Merge tag 'iwlwifi-next-for-kalle-2017-12-20' of git://git.kernel.org/pub/scm/linux...
authorKalle Valo <kvalo@codeaurora.org>
Thu, 11 Jan 2018 16:29:54 +0000 (18:29 +0200)
committerKalle Valo <kvalo@codeaurora.org>
Thu, 11 Jan 2018 16:29:54 +0000 (18:29 +0200)
Third batch of updates for v4.16

* Small cleanups in the new rate-scaling code;
* Some improvements in debugging;
* New FW API changes;
* Fix a bug where we got a false-positive warning;
* Fix forced quota debugfs functionality;

944 files changed:
Documentation/devicetree/bindings/mtd/jedec,spi-nor.txt
Documentation/devicetree/bindings/net/mediatek-net.txt
Documentation/devicetree/bindings/net/phy.txt
Documentation/devicetree/bindings/net/socionext,uniphier-ave4.txt [new file with mode: 0644]
Documentation/devicetree/bindings/sound/da7218.txt
Documentation/devicetree/bindings/sound/da7219.txt
Documentation/devicetree/bindings/spi/fsl-imx-cspi.txt
Documentation/networking/batman-adv.rst
Documentation/networking/xfrm_proc.txt
Documentation/sysctl/net.txt
Documentation/x86/x86_64/mm.txt
MAINTAINERS
Makefile
arch/arm/lib/csumpartialcopyuser.S
arch/arm64/kvm/hyp/debug-sr.c
arch/arm64/net/bpf_jit_comp.c
arch/parisc/boot/compressed/misc.c
arch/parisc/include/asm/thread_info.h
arch/parisc/kernel/entry.S
arch/parisc/kernel/hpmc.S
arch/parisc/kernel/unwind.c
arch/parisc/lib/delay.c
arch/powerpc/include/asm/mmu_context.h
arch/powerpc/kernel/process.c
arch/powerpc/kvm/book3s_xive.c
arch/powerpc/net/bpf_jit_comp64.c
arch/powerpc/perf/core-book3s.c
arch/powerpc/perf/imc-pmu.c
arch/s390/include/asm/diag.h
arch/s390/net/bpf_jit_comp.c
arch/sparc/mm/fault_32.c
arch/sparc/mm/fault_64.c
arch/sparc/net/bpf_jit_comp_64.c
arch/um/include/asm/mmu_context.h
arch/um/kernel/trap.c
arch/unicore32/include/asm/mmu_context.h
arch/x86/Kconfig
arch/x86/entry/entry_32.S
arch/x86/entry/entry_64.S
arch/x86/entry/entry_64_compat.S
arch/x86/entry/vsyscall/vsyscall_64.c
arch/x86/include/asm/cpu_entry_area.h [new file with mode: 0644]
arch/x86/include/asm/cpufeature.h
arch/x86/include/asm/desc.h
arch/x86/include/asm/espfix.h
arch/x86/include/asm/fixmap.h
arch/x86/include/asm/hypervisor.h
arch/x86/include/asm/invpcid.h [new file with mode: 0644]
arch/x86/include/asm/irqflags.h
arch/x86/include/asm/kdebug.h
arch/x86/include/asm/mmu.h
arch/x86/include/asm/mmu_context.h
arch/x86/include/asm/paravirt.h
arch/x86/include/asm/pgtable_32_types.h
arch/x86/include/asm/pgtable_64_types.h
arch/x86/include/asm/processor.h
arch/x86/include/asm/stacktrace.h
arch/x86/include/asm/switch_to.h
arch/x86/include/asm/thread_info.h
arch/x86/include/asm/tlbflush.h
arch/x86/include/asm/traps.h
arch/x86/include/asm/unwind.h
arch/x86/kernel/asm-offsets.c
arch/x86/kernel/asm-offsets_32.c
arch/x86/kernel/asm-offsets_64.c
arch/x86/kernel/cpu/common.c
arch/x86/kernel/cpu/microcode/intel.c
arch/x86/kernel/doublefault.c
arch/x86/kernel/dumpstack.c
arch/x86/kernel/dumpstack_32.c
arch/x86/kernel/dumpstack_64.c
arch/x86/kernel/ioport.c
arch/x86/kernel/irq.c
arch/x86/kernel/irq_64.c
arch/x86/kernel/ldt.c
arch/x86/kernel/paravirt_patch_64.c
arch/x86/kernel/process.c
arch/x86/kernel/process_32.c
arch/x86/kernel/process_64.c
arch/x86/kernel/smpboot.c
arch/x86/kernel/traps.c
arch/x86/kernel/unwind_orc.c
arch/x86/kernel/vmlinux.lds.S
arch/x86/kvm/emulate.c
arch/x86/kvm/mmu.c
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.c
arch/x86/lib/delay.c
arch/x86/mm/Makefile
arch/x86/mm/cpu_entry_area.c [new file with mode: 0644]
arch/x86/mm/dump_pagetables.c
arch/x86/mm/fault.c
arch/x86/mm/init_32.c
arch/x86/mm/kasan_init_64.c
arch/x86/mm/pgtable_32.c
arch/x86/mm/tlb.c
arch/x86/platform/uv/tlb_uv.c
arch/x86/power/cpu.c
arch/x86/xen/enlighten.c
arch/x86/xen/enlighten_pv.c
arch/x86/xen/mmu_pv.c
arch/x86/xen/setup.c
block/bio.c
block/blk-map.c
block/blk-throttle.c
block/bounce.c
block/kyber-iosched.c
crypto/af_alg.c
crypto/algif_aead.c
crypto/algif_skcipher.c
crypto/mcryptd.c
crypto/skcipher.c
drivers/acpi/apei/erst.c
drivers/acpi/cppc_acpi.c
drivers/acpi/nfit/core.c
drivers/block/null_blk.c
drivers/clk/clk.c
drivers/clk/sunxi/clk-sun9i-mmc.c
drivers/cpufreq/cpufreq_governor.c
drivers/cpufreq/imx6q-cpufreq.c
drivers/dma/at_hdmac.c
drivers/dma/dma-jz4740.c
drivers/dma/dmatest.c
drivers/dma/fsl-edma.c
drivers/dma/ioat/init.c
drivers/gpio/gpio-reg.c
drivers/gpio/gpiolib-acpi.c
drivers/gpio/gpiolib-devprop.c
drivers/gpio/gpiolib-of.c
drivers/gpio/gpiolib.h
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
drivers/gpu/drm/amd/display/dc/core/dc_link.c
drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
drivers/gpu/drm/drm_lease.c
drivers/gpu/drm/drm_plane.c
drivers/gpu/drm/drm_syncobj.c
drivers/gpu/drm/i915/gvt/display.c
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_sw_fence.c
drivers/gpu/drm/i915/intel_breadcrumbs.c
drivers/gpu/drm/i915/intel_ddi.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_lpe_audio.c
drivers/gpu/drm/nouveau/nouveau_bo.c
drivers/gpu/drm/nouveau/nouveau_drm.c
drivers/gpu/drm/nouveau/nouveau_drv.h
drivers/gpu/drm/nouveau/nouveau_fbcon.c
drivers/gpu/drm/nouveau/nouveau_mem.c
drivers/gpu/drm/nouveau/nouveau_ttm.c
drivers/gpu/drm/nouveau/nouveau_vmm.c
drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
drivers/gpu/drm/sun4i/sun4i_tcon.c
drivers/gpu/drm/ttm/ttm_page_alloc.c
drivers/hwmon/hwmon.c
drivers/infiniband/core/security.c
drivers/infiniband/core/uverbs_cmd.c
drivers/infiniband/core/verbs.c
drivers/infiniband/hw/cxgb4/cq.c
drivers/infiniband/hw/cxgb4/iw_cxgb4.h
drivers/infiniband/hw/cxgb4/qp.c
drivers/infiniband/hw/cxgb4/t4.h
drivers/infiniband/hw/hfi1/hfi.h
drivers/infiniband/hw/hfi1/pcie.c
drivers/infiniband/hw/mlx5/cmd.c
drivers/infiniband/hw/mlx5/cmd.h
drivers/infiniband/hw/mlx5/main.c
drivers/infiniband/hw/mlx5/mlx5_ib.h
drivers/infiniband/hw/mlx5/mr.c
drivers/infiniband/hw/qedr/main.c
drivers/infiniband/hw/qedr/qedr_hsi_rdma.h
drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c
drivers/infiniband/ulp/ipoib/ipoib_ib.c
drivers/leds/led-core.c
drivers/mfd/cros_ec_spi.c
drivers/mfd/twl4030-audio.c
drivers/mfd/twl6040.c
drivers/misc/pti.c
drivers/mtd/mtdcore.c
drivers/mtd/nand/brcmnand/brcmnand.c
drivers/mtd/nand/gpio.c
drivers/mtd/nand/gpmi-nand/gpmi-nand.c
drivers/net/bonding/bond_options.c
drivers/net/dsa/lan9303-core.c
drivers/net/ethernet/Kconfig
drivers/net/ethernet/Makefile
drivers/net/ethernet/amazon/ena/ena_eth_com.c
drivers/net/ethernet/amazon/ena/ena_eth_com.h
drivers/net/ethernet/amazon/ena/ena_netdev.c
drivers/net/ethernet/amazon/ena/ena_netdev.h
drivers/net/ethernet/amazon/ena/ena_regs_defs.h
drivers/net/ethernet/amd/xgbe/xgbe-drv.c
drivers/net/ethernet/arc/emac.h
drivers/net/ethernet/arc/emac_main.c
drivers/net/ethernet/broadcom/bcm63xx_enet.c
drivers/net/ethernet/broadcom/bcmsysport.c
drivers/net/ethernet/broadcom/bgmac.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/broadcom/bnxt/bnxt.h
drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
drivers/net/ethernet/broadcom/tg3.c
drivers/net/ethernet/broadcom/tg3.h
drivers/net/ethernet/cavium/liquidio/octeon_device.c
drivers/net/ethernet/cavium/thunder/nicvf_main.c
drivers/net/ethernet/cavium/thunder/nicvf_queues.c
drivers/net/ethernet/cavium/thunder/nicvf_queues.h
drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h
drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h
drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
drivers/net/ethernet/cisco/enic/enic.h
drivers/net/ethernet/cisco/enic/enic_main.c
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
drivers/net/ethernet/hisilicon/hns3/hnae3.h
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
drivers/net/ethernet/ibm/emac/core.c
drivers/net/ethernet/ibm/emac/emac.h
drivers/net/ethernet/ibm/emac/phy.c
drivers/net/ethernet/ibm/emac/rgmii.c
drivers/net/ethernet/ibm/emac/zmii.c
drivers/net/ethernet/intel/i40e/i40e_ethtool.c
drivers/net/ethernet/intel/i40e/i40e_txrx.c
drivers/net/ethernet/intel/i40e/i40e_txrx.h
drivers/net/ethernet/intel/ixgbe/ixgbe.h
drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/marvell/Kconfig
drivers/net/ethernet/marvell/mvneta.c
drivers/net/ethernet/marvell/sky2.c
drivers/net/ethernet/mediatek/mtk_eth_soc.c
drivers/net/ethernet/mediatek/mtk_eth_soc.h
drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
drivers/net/ethernet/mellanox/mlx4/en_rx.c
drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
drivers/net/ethernet/mellanox/mlx4/mr.c
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
drivers/net/ethernet/mellanox/mlx5/core/en.h
drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
drivers/net/ethernet/mellanox/mlx5/core/eq.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.c
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
drivers/net/ethernet/mellanox/mlx5/core/health.c
drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
drivers/net/ethernet/mellanox/mlx5/core/lag.c
drivers/net/ethernet/mellanox/mlx5/core/main.c
drivers/net/ethernet/mellanox/mlx5/core/qp.c
drivers/net/ethernet/mellanox/mlx5/core/rl.c
drivers/net/ethernet/mellanox/mlx5/core/vxlan.c
drivers/net/ethernet/mellanox/mlx5/core/vxlan.h
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
drivers/net/ethernet/netronome/nfp/bpf/main.c
drivers/net/ethernet/netronome/nfp/bpf/main.h
drivers/net/ethernet/netronome/nfp/bpf/verifier.c
drivers/net/ethernet/netronome/nfp/flower/cmsg.c
drivers/net/ethernet/netronome/nfp/flower/cmsg.h
drivers/net/ethernet/netronome/nfp/flower/main.c
drivers/net/ethernet/netronome/nfp/flower/main.h
drivers/net/ethernet/netronome/nfp/nfp_app.h
drivers/net/ethernet/netronome/nfp/nfp_net.h
drivers/net/ethernet/netronome/nfp/nfp_net_common.c
drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
drivers/net/ethernet/nvidia/forcedeth.c
drivers/net/ethernet/pasemi/pasemi_mac.c
drivers/net/ethernet/qlogic/Kconfig
drivers/net/ethernet/qlogic/qed/qed.h
drivers/net/ethernet/qlogic/qed/qed_cxt.c
drivers/net/ethernet/qlogic/qed/qed_cxt.h
drivers/net/ethernet/qlogic/qed/qed_dcbx.c
drivers/net/ethernet/qlogic/qed/qed_debug.c
drivers/net/ethernet/qlogic/qed/qed_dev.c
drivers/net/ethernet/qlogic/qed/qed_fcoe.c
drivers/net/ethernet/qlogic/qed/qed_hsi.h
drivers/net/ethernet/qlogic/qed/qed_hw.c
drivers/net/ethernet/qlogic/qed/qed_hw.h
drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
drivers/net/ethernet/qlogic/qed/qed_init_ops.c
drivers/net/ethernet/qlogic/qed/qed_int.c
drivers/net/ethernet/qlogic/qed/qed_int.h
drivers/net/ethernet/qlogic/qed/qed_iscsi.c
drivers/net/ethernet/qlogic/qed/qed_iwarp.c
drivers/net/ethernet/qlogic/qed/qed_iwarp.h
drivers/net/ethernet/qlogic/qed/qed_l2.c
drivers/net/ethernet/qlogic/qed/qed_l2.h
drivers/net/ethernet/qlogic/qed/qed_ll2.c
drivers/net/ethernet/qlogic/qed/qed_mcp.c
drivers/net/ethernet/qlogic/qed/qed_rdma.c
drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
drivers/net/ethernet/qlogic/qed/qed_spq.c
drivers/net/ethernet/qlogic/qed/qed_sriov.c
drivers/net/ethernet/qlogic/qed/qed_sriov.h
drivers/net/ethernet/qlogic/qede/qede.h
drivers/net/ethernet/qlogic/qede/qede_filter.c
drivers/net/ethernet/qlogic/qede/qede_fp.c
drivers/net/ethernet/qlogic/qede/qede_main.c
drivers/net/ethernet/qualcomm/emac/emac.c
drivers/net/ethernet/renesas/ravb_main.c
drivers/net/ethernet/renesas/sh_eth.c
drivers/net/ethernet/sfc/ef10.c
drivers/net/ethernet/sfc/efx.c
drivers/net/ethernet/sfc/mcdi_pcol.h
drivers/net/ethernet/sfc/mcdi_port.c
drivers/net/ethernet/sfc/net_driver.h
drivers/net/ethernet/sfc/nic.h
drivers/net/ethernet/sfc/siena.c
drivers/net/ethernet/socionext/Kconfig [new file with mode: 0644]
drivers/net/ethernet/socionext/Makefile [new file with mode: 0644]
drivers/net/ethernet/socionext/sni_ave.c [new file with mode: 0644]
drivers/net/ethernet/stmicro/stmmac/common.h
drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
drivers/net/ethernet/stmicro/stmmac/enh_desc.c
drivers/net/ethernet/stmicro/stmmac/norm_desc.c
drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/synopsys/dwc-xlgmac-common.c
drivers/net/netdevsim/bpf.c
drivers/net/netdevsim/netdev.c
drivers/net/phy/at803x.c
drivers/net/phy/fixed_phy.c
drivers/net/phy/marvell.c
drivers/net/phy/marvell10g.c
drivers/net/phy/mdio-xgene.c
drivers/net/phy/mdio_bus.c
drivers/net/phy/mdio_device.c
drivers/net/phy/micrel.c
drivers/net/phy/phy-c45.c
drivers/net/phy/phy-core.c
drivers/net/phy/phy_device.c
drivers/net/phy/phylink.c
drivers/net/phy/sfp-bus.c
drivers/net/phy/sfp.c
drivers/net/tun.c
drivers/net/virtio_net.c
drivers/net/vxlan.c
drivers/net/wireless/ath/ath10k/Makefile
drivers/net/wireless/ath/ath10k/ahb.c
drivers/net/wireless/ath/ath10k/bmi.c
drivers/net/wireless/ath/ath10k/bmi.h
drivers/net/wireless/ath/ath10k/ce.c
drivers/net/wireless/ath/ath10k/ce.h
drivers/net/wireless/ath/ath10k/core.c
drivers/net/wireless/ath/ath10k/core.h
drivers/net/wireless/ath/ath10k/coredump.c [new file with mode: 0644]
drivers/net/wireless/ath/ath10k/coredump.h [new file with mode: 0644]
drivers/net/wireless/ath/ath10k/debug.c
drivers/net/wireless/ath/ath10k/debug.h
drivers/net/wireless/ath/ath10k/debugfs_sta.c
drivers/net/wireless/ath/ath10k/hif.h
drivers/net/wireless/ath/ath10k/htc.c
drivers/net/wireless/ath/ath10k/htc.h
drivers/net/wireless/ath/ath10k/htt.c
drivers/net/wireless/ath/ath10k/htt.h
drivers/net/wireless/ath/ath10k/htt_rx.c
drivers/net/wireless/ath/ath10k/htt_tx.c
drivers/net/wireless/ath/ath10k/hw.c
drivers/net/wireless/ath/ath10k/hw.h
drivers/net/wireless/ath/ath10k/mac.c
drivers/net/wireless/ath/ath10k/mac.h
drivers/net/wireless/ath/ath10k/pci.c
drivers/net/wireless/ath/ath10k/pci.h
drivers/net/wireless/ath/ath10k/rx_desc.h
drivers/net/wireless/ath/ath10k/spectral.c
drivers/net/wireless/ath/ath10k/spectral.h
drivers/net/wireless/ath/ath10k/swap.c
drivers/net/wireless/ath/ath10k/swap.h
drivers/net/wireless/ath/ath10k/targaddrs.h
drivers/net/wireless/ath/ath10k/testmode.c
drivers/net/wireless/ath/ath10k/testmode_i.h
drivers/net/wireless/ath/ath10k/thermal.c
drivers/net/wireless/ath/ath10k/thermal.h
drivers/net/wireless/ath/ath10k/trace.h
drivers/net/wireless/ath/ath10k/txrx.c
drivers/net/wireless/ath/ath10k/txrx.h
drivers/net/wireless/ath/ath10k/wmi-ops.h
drivers/net/wireless/ath/ath10k/wmi-tlv.c
drivers/net/wireless/ath/ath10k/wmi-tlv.h
drivers/net/wireless/ath/ath10k/wmi.c
drivers/net/wireless/ath/ath10k/wmi.h
drivers/net/wireless/ath/ath10k/wow.c
drivers/net/wireless/ath/ath10k/wow.h
drivers/net/wireless/ath/wcn36xx/smd.c
drivers/net/wireless/ath/wil6210/cfg80211.c
drivers/net/wireless/ath/wil6210/debugfs.c
drivers/net/wireless/ath/wil6210/interrupt.c
drivers/net/wireless/ath/wil6210/main.c
drivers/net/wireless/ath/wil6210/netdev.c
drivers/net/wireless/ath/wil6210/pcie_bus.c
drivers/net/wireless/ath/wil6210/pm.c
drivers/net/wireless/ath/wil6210/pmc.c
drivers/net/wireless/ath/wil6210/txrx.c
drivers/net/wireless/ath/wil6210/wil6210.h
drivers/net/wireless/ath/wil6210/wil_crash_dump.c
drivers/net/wireless/ath/wil6210/wil_platform.h
drivers/net/wireless/ath/wil6210/wmi.c
drivers/net/wireless/ath/wil6210/wmi.h
drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h
drivers/net/wireless/mac80211_hwsim.c
drivers/net/wireless/marvell/mwifiex/cmdevt.c
drivers/net/wireless/marvell/mwifiex/debugfs.c
drivers/net/wireless/marvell/mwifiex/fw.h
drivers/net/wireless/marvell/mwifiex/init.c
drivers/net/wireless/marvell/mwifiex/main.c
drivers/net/wireless/marvell/mwifiex/main.h
drivers/net/wireless/marvell/mwifiex/pcie.c
drivers/net/wireless/marvell/mwifiex/sdio.c
drivers/net/wireless/marvell/mwifiex/sta_cmd.c
drivers/net/wireless/marvell/mwifiex/sta_event.c
drivers/net/wireless/mediatek/mt76/debugfs.c
drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.c
drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.h
drivers/net/wireless/mediatek/mt76/mt76x2_init.c
drivers/net/wireless/mediatek/mt76/mt76x2_mac.c
drivers/net/wireless/mediatek/mt76/mt76x2_main.c
drivers/net/wireless/mediatek/mt76/mt76x2_mcu.c
drivers/net/wireless/mediatek/mt76/mt76x2_phy.c
drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
drivers/net/wireless/quantenna/qtnfmac/commands.c
drivers/net/wireless/quantenna/qtnfmac/commands.h
drivers/net/wireless/quantenna/qtnfmac/core.c
drivers/net/wireless/quantenna/qtnfmac/core.h
drivers/net/wireless/quantenna/qtnfmac/event.c
drivers/net/wireless/quantenna/qtnfmac/qlink.h
drivers/net/wireless/quantenna/qtnfmac/qlink_util.c
drivers/net/wireless/quantenna/qtnfmac/qlink_util.h
drivers/net/wireless/quantenna/qtnfmac/util.c
drivers/net/wireless/quantenna/qtnfmac/util.h
drivers/net/wireless/ralink/rt2x00/rt2800lib.c
drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
drivers/net/wireless/realtek/rtlwifi/base.c
drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c
drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.h
drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c
drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.h
drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c
drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.h
drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c
drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.h
drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c
drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.h
drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c
drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h
drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c
drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.h
drivers/net/wireless/realtek/rtlwifi/core.c
drivers/net/wireless/realtek/rtlwifi/debug.c
drivers/net/wireless/realtek/rtlwifi/debug.h
drivers/net/wireless/realtek/rtlwifi/efuse.c
drivers/net/wireless/realtek/rtlwifi/pci.c
drivers/net/wireless/realtek/rtlwifi/rc.c
drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c
drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c
drivers/net/wireless/realtek/rtlwifi/wifi.h
drivers/net/wireless/ti/wl1251/main.c
drivers/nvdimm/btt.c
drivers/nvdimm/btt.h
drivers/nvdimm/pfn_devs.c
drivers/nvme/host/core.c
drivers/nvme/host/fc.c
drivers/of/of_mdio.c
drivers/parisc/lba_pci.c
drivers/pci/pci-driver.c
drivers/pinctrl/intel/pinctrl-cherryview.c
drivers/s390/net/Kconfig
drivers/s390/net/lcs.c
drivers/s390/net/lcs.h
drivers/s390/net/qeth_core.h
drivers/s390/net/qeth_core_main.c
drivers/s390/net/qeth_core_mpc.h
drivers/s390/net/qeth_l2.h
drivers/s390/net/qeth_l2_main.c
drivers/s390/net/qeth_l3.h
drivers/s390/net/qeth_l3_main.c
drivers/s390/net/qeth_l3_sys.c
drivers/scsi/aacraid/aacraid.h
drivers/scsi/aacraid/linit.c
drivers/scsi/osd/osd_initiator.c
drivers/scsi/qedf/drv_fcoe_fw_funcs.c
drivers/scsi/qedf/drv_fcoe_fw_funcs.h
drivers/scsi/qedf/qedf.h
drivers/scsi/qedf/qedf_els.c
drivers/scsi/qedf/qedf_hsi.h
drivers/scsi/qedf/qedf_io.c
drivers/scsi/qedf/qedf_main.c
drivers/scsi/qedf/qedf_version.h
drivers/scsi/qedi/qedi_debugfs.c
drivers/scsi/qedi/qedi_fw.c
drivers/scsi/qedi/qedi_fw_api.c
drivers/scsi/qedi/qedi_fw_iscsi.h
drivers/scsi/qedi/qedi_gbl.h
drivers/scsi/qedi/qedi_iscsi.c
drivers/scsi/qedi/qedi_iscsi.h
drivers/scsi/qedi/qedi_main.c
drivers/scsi/qedi/qedi_version.h
drivers/scsi/scsi_devinfo.c
drivers/scsi/scsi_scan.c
drivers/scsi/scsi_sysfs.c
drivers/scsi/scsi_transport_spi.c
drivers/spi/spi-armada-3700.c
drivers/spi/spi-atmel.c
drivers/spi/spi-rspi.c
drivers/spi/spi-sun4i.c
drivers/spi/spi-xilinx.c
drivers/target/target_core_pscsi.c
drivers/xen/balloon.c
fs/cramfs/Kconfig
fs/exec.c
fs/ext4/extents.c
fs/ext4/ialloc.c
fs/ext4/inode.c
fs/ext4/namei.c
fs/namespace.c
fs/nsfs.c
fs/super.c
fs/xfs/libxfs/xfs_alloc.c
fs/xfs/libxfs/xfs_attr.c
fs/xfs/libxfs/xfs_attr_leaf.c
fs/xfs/libxfs/xfs_attr_leaf.h
fs/xfs/libxfs/xfs_bmap.c
fs/xfs/libxfs/xfs_defer.c
fs/xfs/libxfs/xfs_defer.h
fs/xfs/libxfs/xfs_iext_tree.c
fs/xfs/libxfs/xfs_refcount.c
fs/xfs/libxfs/xfs_rmap.c
fs/xfs/libxfs/xfs_rmap.h
fs/xfs/xfs_extfree_item.c
fs/xfs/xfs_fsops.c
fs/xfs/xfs_icache.c
fs/xfs/xfs_icache.h
fs/xfs/xfs_inode.c
fs/xfs/xfs_inode.h
fs/xfs/xfs_reflink.c
fs/xfs/xfs_super.c
include/asm-generic/mm_hooks.h
include/asm-generic/pgtable.h
include/crypto/mcryptd.h
include/kvm/arm_arch_timer.h
include/linux/bio.h
include/linux/blk_types.h
include/linux/blkdev.h
include/linux/bpf.h
include/linux/bpf_types.h
include/linux/bpf_verifier.h
include/linux/dsa/lan9303.h
include/linux/filter.h
include/linux/intel-pti.h [moved from include/linux/pti.h with 94% similarity]
include/linux/ipv6.h
include/linux/mdio.h
include/linux/mfd/rtsx_pci.h
include/linux/mlx5/driver.h
include/linux/mlx5/fs.h
include/linux/mlx5/mlx5_ifc.h
include/linux/netdevice.h
include/linux/phy.h
include/linux/phy_fixed.h
include/linux/proc_ns.h
include/linux/ptr_ring.h
include/linux/qed/common_hsi.h
include/linux/qed/eth_common.h
include/linux/qed/fcoe_common.h
include/linux/qed/iscsi_common.h
include/linux/qed/iwarp_common.h
include/linux/qed/qed_eth_if.h
include/linux/qed/qed_if.h
include/linux/qed/qed_iscsi_if.h
include/linux/qed/qed_ll2_if.h
include/linux/qed/rdma_common.h
include/linux/qed/roce_common.h
include/linux/qed/storage_common.h
include/linux/qed/tcp_common.h
include/linux/rtnetlink.h
include/linux/sfp.h
include/linux/spi/spi.h
include/net/cfg80211.h
include/net/inet_sock.h
include/net/ip6_fib.h
include/net/ip6_route.h
include/net/mac80211.h
include/net/pkt_cls.h
include/net/pkt_sched.h
include/net/sch_generic.h
include/net/sock.h
include/net/xdp.h [new file with mode: 0644]
include/net/xfrm.h
include/trace/events/clk.h
include/trace/events/kvm.h
include/trace/events/net_probe_common.h [new file with mode: 0644]
include/trace/events/sctp.h [new file with mode: 0644]
include/trace/events/sock.h
include/trace/events/tcp.h
include/uapi/linux/batadv_packet.h [moved from net/batman-adv/packet.h with 80% similarity]
include/uapi/linux/batman_adv.h
include/uapi/linux/bpf.h
include/uapi/linux/inet_diag.h
include/uapi/linux/l2tp.h
include/uapi/linux/nl80211.h
include/xen/balloon.h
init/main.c
kernel/bpf/Makefile
kernel/bpf/cgroup.c
kernel/bpf/core.c
kernel/bpf/disasm.c
kernel/bpf/disasm.h
kernel/bpf/offload.c
kernel/bpf/sockmap.c
kernel/bpf/stackmap.c
kernel/bpf/syscall.c
kernel/bpf/verifier.c
kernel/fork.c
kernel/time/posix-timers.c
kernel/trace/ring_buffer.c
kernel/trace/trace.c
lib/test_bpf.c
mm/backing-dev.c
net/Kconfig
net/batman-adv/Kconfig
net/batman-adv/Makefile
net/batman-adv/bat_algo.c
net/batman-adv/bat_algo.h
net/batman-adv/bat_iv_ogm.c
net/batman-adv/bat_iv_ogm.h
net/batman-adv/bat_v.c
net/batman-adv/bat_v.h
net/batman-adv/bat_v_elp.c
net/batman-adv/bat_v_elp.h
net/batman-adv/bat_v_ogm.c
net/batman-adv/bat_v_ogm.h
net/batman-adv/bitarray.c
net/batman-adv/bitarray.h
net/batman-adv/bridge_loop_avoidance.c
net/batman-adv/bridge_loop_avoidance.h
net/batman-adv/debugfs.c
net/batman-adv/debugfs.h
net/batman-adv/distributed-arp-table.c
net/batman-adv/distributed-arp-table.h
net/batman-adv/fragmentation.c
net/batman-adv/fragmentation.h
net/batman-adv/gateway_client.c
net/batman-adv/gateway_client.h
net/batman-adv/gateway_common.c
net/batman-adv/gateway_common.h
net/batman-adv/hard-interface.c
net/batman-adv/hard-interface.h
net/batman-adv/hash.c
net/batman-adv/hash.h
net/batman-adv/icmp_socket.c
net/batman-adv/icmp_socket.h
net/batman-adv/log.c
net/batman-adv/log.h
net/batman-adv/main.c
net/batman-adv/main.h
net/batman-adv/multicast.c
net/batman-adv/multicast.h
net/batman-adv/netlink.c
net/batman-adv/netlink.h
net/batman-adv/network-coding.c
net/batman-adv/network-coding.h
net/batman-adv/originator.c
net/batman-adv/originator.h
net/batman-adv/routing.c
net/batman-adv/routing.h
net/batman-adv/send.c
net/batman-adv/send.h
net/batman-adv/soft-interface.c
net/batman-adv/soft-interface.h
net/batman-adv/sysfs.c
net/batman-adv/sysfs.h
net/batman-adv/tp_meter.c
net/batman-adv/tp_meter.h
net/batman-adv/translation-table.c
net/batman-adv/translation-table.h
net/batman-adv/tvlv.c
net/batman-adv/tvlv.h
net/batman-adv/types.h
net/bridge/br_netlink.c
net/bridge/br_sysfs_br.c
net/core/Makefile
net/core/dev.c
net/core/filter.c
net/core/flow_dissector.c
net/core/net_namespace.c
net/core/skbuff.c
net/core/xdp.c [new file with mode: 0644]
net/dccp/Kconfig
net/dccp/Makefile
net/dccp/ackvec.c
net/dccp/probe.c [deleted file]
net/dccp/proto.c
net/dccp/trace.h [new file with mode: 0644]
net/dsa/dsa_priv.h
net/dsa/tag_brcm.c
net/ipv4/Makefile
net/ipv4/af_inet.c
net/ipv4/esp4.c
net/ipv4/esp4_offload.c
net/ipv4/fib_frontend.c
net/ipv4/fib_semantics.c
net/ipv4/inet_connection_sock.c
net/ipv4/inet_diag.c
net/ipv4/inet_hashtables.c
net/ipv4/ip_gre.c
net/ipv4/tcp.c
net/ipv4/tcp_diag.c
net/ipv4/tcp_input.c
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_probe.c [deleted file]
net/ipv4/udp.c
net/ipv4/xfrm4_input.c
net/ipv4/xfrm4_mode_tunnel.c
net/ipv6/addrconf.c
net/ipv6/af_inet6.c
net/ipv6/esp6.c
net/ipv6/esp6_offload.c
net/ipv6/ip6_fib.c
net/ipv6/ip6_gre.c
net/ipv6/ip6_output.c
net/ipv6/ip6_tunnel.c
net/ipv6/ip6_vti.c
net/ipv6/ipv6_sockglue.c
net/ipv6/route.c
net/ipv6/tcp_ipv6.c
net/ipv6/xfrm6_input.c
net/ipv6/xfrm6_mode_tunnel.c
net/l2tp/l2tp_core.c
net/l2tp/l2tp_core.h
net/l2tp/l2tp_debugfs.c
net/l2tp/l2tp_netlink.c
net/mac80211/agg-rx.c
net/mac80211/agg-tx.c
net/mac80211/cfg.c
net/mac80211/debugfs.c
net/mac80211/driver-ops.h
net/mac80211/ht.c
net/mac80211/ieee80211_i.h
net/mac80211/iface.c
net/mac80211/key.c
net/mac80211/main.c
net/mac80211/mesh.c
net/mac80211/mesh_hwmp.c
net/mac80211/mesh_plink.c
net/mac80211/mlme.c
net/mac80211/offchannel.c
net/mac80211/rx.c
net/mac80211/tdls.c
net/mac80211/tx.c
net/mac80211/util.c
net/mac80211/wme.c
net/mac80211/wpa.c
net/openvswitch/conntrack.c
net/openvswitch/flow.c
net/packet/af_packet.c
net/rds/bind.c
net/rds/cong.c
net/rds/connection.c
net/rds/rds.h
net/rds/send.c
net/rds/tcp.c
net/rds/tcp_connect.c
net/rds/tcp_recv.c
net/rds/tcp_send.c
net/rds/threads.c
net/sched/act_police.c
net/sched/cls_api.c
net/sched/cls_bpf.c
net/sched/sch_api.c
net/sched/sch_atm.c
net/sched/sch_cbq.c
net/sched/sch_cbs.c
net/sched/sch_choke.c
net/sched/sch_codel.c
net/sched/sch_drr.c
net/sched/sch_dsmark.c
net/sched/sch_fifo.c
net/sched/sch_fq.c
net/sched/sch_fq_codel.c
net/sched/sch_generic.c
net/sched/sch_gred.c
net/sched/sch_hfsc.c
net/sched/sch_hhf.c
net/sched/sch_htb.c
net/sched/sch_ingress.c
net/sched/sch_mq.c
net/sched/sch_mqprio.c
net/sched/sch_multiq.c
net/sched/sch_netem.c
net/sched/sch_pie.c
net/sched/sch_plug.c
net/sched/sch_prio.c
net/sched/sch_qfq.c
net/sched/sch_red.c
net/sched/sch_sfb.c
net/sched/sch_sfq.c
net/sched/sch_tbf.c
net/sched/sch_teql.c
net/sctp/Kconfig
net/sctp/Makefile
net/sctp/debug.c
net/sctp/endpointola.c
net/sctp/probe.c [deleted file]
net/sctp/sm_sideeffect.c
net/sctp/sm_statefuns.c
net/sctp/socket.c
net/sctp/ulpqueue.c
net/strparser/strparser.c
net/tipc/bearer.c
net/tipc/group.c
net/tipc/monitor.c
net/tipc/socket.c
net/wireless/Makefile
net/wireless/certs/sforshee.hex [new file with mode: 0644]
net/wireless/certs/sforshee.x509 [deleted file]
net/wireless/ibss.c
net/wireless/mlme.c
net/wireless/nl80211.c
net/wireless/scan.c
net/wireless/trace.h
net/xfrm/xfrm_device.c
net/xfrm/xfrm_input.c
net/xfrm/xfrm_policy.c
net/xfrm/xfrm_state.c
net/xfrm/xfrm_user.c
samples/bpf/Makefile
samples/bpf/xdp_rxq_info_kern.c [new file with mode: 0644]
samples/bpf/xdp_rxq_info_user.c [new file with mode: 0644]
sound/core/rawmidi.c
sound/hda/hdac_i915.c
sound/pci/hda/patch_conexant.c
sound/pci/hda/patch_hdmi.c
sound/pci/hda/patch_realtek.c
sound/soc/amd/acp-pcm-dma.c
sound/soc/atmel/Kconfig
sound/soc/codecs/da7218.c
sound/soc/codecs/msm8916-wcd-analog.c
sound/soc/codecs/msm8916-wcd-digital.c
sound/soc/codecs/nau8825.c
sound/soc/codecs/rt5514-spi.c
sound/soc/codecs/rt5514.c
sound/soc/codecs/rt5645.c
sound/soc/codecs/rt5663.c
sound/soc/codecs/rt5663.h
sound/soc/codecs/tlv320aic31xx.h
sound/soc/codecs/twl4030.c
sound/soc/codecs/wm_adsp.c
sound/soc/fsl/fsl_asrc.h
sound/soc/fsl/fsl_ssi.c
sound/soc/intel/boards/kbl_rt5663_max98927.c
sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c
sound/soc/intel/skylake/skl-nhlt.c
sound/soc/intel/skylake/skl-topology.c
sound/soc/rockchip/rockchip_spdif.c
sound/soc/sh/rcar/adg.c
sound/soc/sh/rcar/core.c
sound/soc/sh/rcar/dma.c
sound/soc/sh/rcar/ssi.c
sound/soc/sh/rcar/ssiu.c
sound/usb/mixer.c
sound/usb/quirks.c
tools/arch/s390/include/uapi/asm/bpf_perf_event.h
tools/bpf/Makefile
tools/bpf/bpf_jit_disasm.c
tools/bpf/bpftool/Documentation/bpftool-cgroup.rst
tools/bpf/bpftool/Documentation/bpftool-map.rst
tools/bpf/bpftool/Documentation/bpftool-prog.rst
tools/bpf/bpftool/Documentation/bpftool.rst
tools/bpf/bpftool/Makefile
tools/bpf/bpftool/bash-completion/bpftool
tools/bpf/bpftool/cgroup.c
tools/bpf/bpftool/common.c
tools/bpf/bpftool/jit_disasm.c
tools/bpf/bpftool/main.c
tools/bpf/bpftool/main.h
tools/bpf/bpftool/map.c
tools/bpf/bpftool/prog.c
tools/build/feature/Makefile
tools/build/feature/test-disassembler-four-args.c [new file with mode: 0644]
tools/include/uapi/linux/bpf.h
tools/kvm/kvm_stat/kvm_stat
tools/kvm/kvm_stat/kvm_stat.txt
tools/lib/bpf/libbpf.c
tools/testing/selftests/bpf/Makefile
tools/testing/selftests/bpf/config
tools/testing/selftests/bpf/test_dev_cgroup.c
tools/testing/selftests/bpf/test_offload.py
tools/testing/selftests/bpf/test_progs.c
tools/testing/selftests/bpf/test_stacktrace_map.c [new file with mode: 0644]
tools/testing/selftests/bpf/test_verifier.c
tools/testing/selftests/net/Makefile
tools/testing/selftests/net/config
tools/testing/selftests/net/fib_tests.sh [new file with mode: 0755]
tools/testing/selftests/net/msg_zerocopy.c
tools/testing/selftests/net/rtnetlink.sh
tools/testing/selftests/x86/ldt_gdt.c
virt/kvm/arm/arch_timer.c
virt/kvm/arm/arm.c
virt/kvm/arm/mmio.c
virt/kvm/arm/mmu.c

index 376fa2f..956bb04 100644 (file)
@@ -13,7 +13,6 @@ Required properties:
                  at25df321a
                  at25df641
                  at26df081a
-                 en25s64
                  mr25h128
                  mr25h256
                  mr25h10
@@ -33,7 +32,6 @@ Required properties:
                  s25fl008k
                  s25fl064k
                  sst25vf040b
-                 sst25wf040b
                  m25p40
                  m25p80
                  m25p16
index 214eaa9..53c13ee 100644 (file)
@@ -28,7 +28,7 @@ Required properties:
 - mediatek,sgmiisys: phandle to the syscon node that handles the SGMII setup
        which is required for those SoCs equipped with SGMII such as MT7622 SoC.
 - mediatek,pctl: phandle to the syscon node that handles the ports slew rate
-       and driver current
+       and driver current: only for MT2701 and MT7623 SoC
 
 Optional properties:
 - interrupt-parent: Should be the phandle for the interrupt controller
index 72860ce..d2169a5 100644 (file)
@@ -55,10 +55,10 @@ Optional Properties:
 
 - reset-gpios: The GPIO phandle and specifier for the PHY reset signal.
 
-- reset-delay-us: Delay after the reset was asserted in microseconds.
+- reset-assert-us: Delay after the reset was asserted in microseconds.
   If this property is missing the delay will be skipped.
 
-- reset-post-delay-us: Delay after the reset was deasserted in microseconds.
+- reset-deassert-us: Delay after the reset was deasserted in microseconds.
   If this property is missing the delay will be skipped.
 
 Example:
@@ -70,6 +70,6 @@ ethernet-phy@0 {
        reg = <0>;
 
        reset-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>;
-       reset-delay-us = <1000>;
-       reset-post-delay-us = <2000>;
+       reset-assert-us = <1000>;
+       reset-deassert-us = <2000>;
 };
diff --git a/Documentation/devicetree/bindings/net/socionext,uniphier-ave4.txt b/Documentation/devicetree/bindings/net/socionext,uniphier-ave4.txt
new file mode 100644 (file)
index 0000000..270ea4e
--- /dev/null
@@ -0,0 +1,48 @@
+* Socionext AVE ethernet controller
+
+This describes the devicetree bindings for AVE ethernet controller
+implemented on Socionext UniPhier SoCs.
+
+Required properties:
+ - compatible: Should be
+       - "socionext,uniphier-pro4-ave4" : for Pro4 SoC
+       - "socionext,uniphier-pxs2-ave4" : for PXs2 SoC
+       - "socionext,uniphier-ld11-ave4" : for LD11 SoC
+       - "socionext,uniphier-ld20-ave4" : for LD20 SoC
+ - reg: Address where registers are mapped and size of region.
+ - interrupts: Should contain the MAC interrupt.
+ - phy-mode: See ethernet.txt in the same directory. Allow to choose
+       "rgmii", "rmii", or "mii" according to the PHY.
+ - phy-handle: Should point to the external phy device.
+       See ethernet.txt file in the same directory.
+ - clocks: A phandle to the clock for the MAC.
+
+Optional properties:
+ - resets: A phandle to the reset control for the MAC.
+ - local-mac-address: See ethernet.txt in the same directory.
+
+Required subnode:
+ - mdio: A container for child nodes representing phy nodes.
+         See phy.txt in the same directory.
+
+Example:
+
+       ether: ethernet@65000000 {
+               compatible = "socionext,uniphier-ld20-ave4";
+               reg = <0x65000000 0x8500>;
+               interrupts = <0 66 4>;
+               phy-mode = "rgmii";
+               phy-handle = <&ethphy>;
+               clocks = <&sys_clk 6>;
+               resets = <&sys_rst 6>;
+               local-mac-address = [00 00 00 00 00 00];
+
+               mdio {
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+
+                       ethphy: ethphy@1 {
+                               reg = <1>;
+                       };
+               };
+       };
index 5ca5a70..3ab9dfe 100644 (file)
@@ -73,7 +73,7 @@ Example:
                compatible = "dlg,da7218";
                reg = <0x1a>;
                interrupt-parent = <&gpio6>;
-               interrupts = <11 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <11 IRQ_TYPE_LEVEL_LOW>;
                wakeup-source;
 
                VDD-supply = <&reg_audio>;
index cf61681..5b54d2d 100644 (file)
@@ -77,7 +77,7 @@ Example:
                reg = <0x1a>;
 
                interrupt-parent = <&gpio6>;
-               interrupts = <11 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <11 IRQ_TYPE_LEVEL_LOW>;
 
                VDD-supply = <&reg_audio>;
                VDDMIC-supply = <&reg_audio>;
index 5bf1396..e3c48b2 100644 (file)
@@ -12,24 +12,30 @@ Required properties:
   - "fsl,imx53-ecspi" for SPI compatible with the one integrated on i.MX53 and later Soc
 - reg : Offset and length of the register set for the device
 - interrupts : Should contain CSPI/eCSPI interrupt
-- cs-gpios : Specifies the gpio pins to be used for chipselects.
 - clocks : Clock specifiers for both ipg and per clocks.
 - clock-names : Clock names should include both "ipg" and "per"
 See the clock consumer binding,
        Documentation/devicetree/bindings/clock/clock-bindings.txt
-- dmas: DMA specifiers for tx and rx dma. See the DMA client binding,
-               Documentation/devicetree/bindings/dma/dma.txt
-- dma-names: DMA request names should include "tx" and "rx" if present.
 
-Obsolete properties:
-- fsl,spi-num-chipselects : Contains the number of the chipselect
+Recommended properties:
+- cs-gpios : GPIOs to use as chip selects, see spi-bus.txt.  While the native chip
+select lines can be used, they appear to always generate a pulse between each
+word of a transfer.  Most use cases will require GPIO based chip selects to
+generate a valid transaction.
 
 Optional properties:
+- num-cs :  Number of total chip selects, see spi-bus.txt.
+- dmas: DMA specifiers for tx and rx dma. See the DMA client binding,
+Documentation/devicetree/bindings/dma/dma.txt.
+- dma-names: DMA request names, if present, should include "tx" and "rx".
 - fsl,spi-rdy-drctl: Integer, representing the value of DRCTL, the register
 controlling the SPI_READY handling. Note that to enable the DRCTL consideration,
 the SPI_READY mode-flag needs to be set too.
 Valid values are: 0 (disabled), 1 (edge-triggered burst) and 2 (level-triggered burst).
 
+Obsolete properties:
+- fsl,spi-num-chipselects : Contains the number of the chipselect
+
 Example:
 
 ecspi@70010000 {
index a342b2c..245fb6c 100644 (file)
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
 ==========
 batman-adv
 ==========
index d0d8baf..2eae619 100644 (file)
@@ -5,13 +5,15 @@ Masahide NAKAMURA <nakam@linux-ipv6.org>
 
 Transformation Statistics
 -------------------------
-xfrm_proc is a statistics shown factor dropped by transformation
-for developer.
-It is a counter designed from current transformation source code
-and defined like linux private MIB.
 
-Inbound statistics
-~~~~~~~~~~~~~~~~~~
+The xfrm_proc code is a set of statistics showing numbers of packets
+dropped by the transformation code and why.  These counters are defined
+as part of the linux private MIB.  These counters can be viewed in
+/proc/net/xfrm_stat.
+
+
+Inbound errors
+~~~~~~~~~~~~~~
 XfrmInError:
        All errors which is not matched others
 XfrmInBufferError:
@@ -46,6 +48,10 @@ XfrmInPolBlock:
        Policy discards
 XfrmInPolError:
        Policy error
+XfrmAcquireError:
+       State hasn't been fully acquired before use
+XfrmFwdHdrError:
+       Forward routing of a packet is not allowed
 
 Outbound errors
 ~~~~~~~~~~~~~~~
@@ -72,3 +78,5 @@ XfrmOutPolDead:
        Policy is dead
 XfrmOutPolError:
        Policy error
+XfrmOutStateInvalid:
+       State is invalid, perhaps expired
index b67044a..35c62f5 100644 (file)
@@ -95,7 +95,9 @@ dev_weight
 --------------
 
 The maximum number of packets that kernel can handle on a NAPI interrupt,
-it's a Per-CPU variable.
+it's a Per-CPU variable. For drivers that support LRO or GRO_HW, a hardware
+aggregated packet is counted as one packet in this context.
+
 Default: 64
 
 dev_weight_rx_bias
index 3448e67..5110170 100644 (file)
@@ -1,6 +1,4 @@
 
-<previous description obsolete, deleted>
-
 Virtual memory map with 4 level page tables:
 
 0000000000000000 - 00007fffffffffff (=47 bits) user space, different per mm
@@ -14,13 +12,15 @@ ffffea0000000000 - ffffeaffffffffff (=40 bits) virtual memory map (1TB)
 ... unused hole ...
 ffffec0000000000 - fffffbffffffffff (=44 bits) kasan shadow memory (16TB)
 ... unused hole ...
+fffffe8000000000 - fffffeffffffffff (=39 bits) cpu_entry_area mapping
 ffffff0000000000 - ffffff7fffffffff (=39 bits) %esp fixup stacks
 ... unused hole ...
 ffffffef00000000 - fffffffeffffffff (=64 GB) EFI region mapping space
 ... unused hole ...
 ffffffff80000000 - ffffffff9fffffff (=512 MB)  kernel text mapping, from phys 0
-ffffffffa0000000 - ffffffffff5fffff (=1526 MB) module mapping space (variable)
-ffffffffff600000 - ffffffffffdfffff (=8 MB) vsyscalls
+ffffffffa0000000 - [fixmap start]   (~1526 MB) module mapping space (variable)
+[fixmap start]   - ffffffffff5fffff kernel-internal fixmap range
+ffffffffff600000 - ffffffffff600fff (=4 kB) legacy vsyscall ABI
 ffffffffffe00000 - ffffffffffffffff (=2 MB) unused hole
 
 Virtual memory map with 5 level page tables:
@@ -36,19 +36,22 @@ ffd4000000000000 - ffd5ffffffffffff (=49 bits) virtual memory map (512TB)
 ... unused hole ...
 ffdf000000000000 - fffffc0000000000 (=53 bits) kasan shadow memory (8PB)
 ... unused hole ...
+fffffe8000000000 - fffffeffffffffff (=39 bits) cpu_entry_area mapping
 ffffff0000000000 - ffffff7fffffffff (=39 bits) %esp fixup stacks
 ... unused hole ...
 ffffffef00000000 - fffffffeffffffff (=64 GB) EFI region mapping space
 ... unused hole ...
 ffffffff80000000 - ffffffff9fffffff (=512 MB)  kernel text mapping, from phys 0
-ffffffffa0000000 - ffffffffff5fffff (=1526 MB) module mapping space
-ffffffffff600000 - ffffffffffdfffff (=8 MB) vsyscalls
+ffffffffa0000000 - [fixmap start]   (~1526 MB) module mapping space
+[fixmap start]   - ffffffffff5fffff kernel-internal fixmap range
+ffffffffff600000 - ffffffffff600fff (=4 kB) legacy vsyscall ABI
 ffffffffffe00000 - ffffffffffffffff (=2 MB) unused hole
 
 Architecture defines a 64-bit virtual address. Implementations can support
 less. Currently supported are 48- and 57-bit virtual addresses. Bits 63
-through to the most-significant implemented bit are set to either all ones
-or all zero. This causes hole between user space and kernel addresses.
+through to the most-significant implemented bit are sign extended.
+This causes hole between user space and kernel addresses if you interpret them
+as unsigned.
 
 The direct mapping covers all memory in the system up to the highest
 memory address (this means in some cases it can also include PCI memory
@@ -58,9 +61,6 @@ vmalloc space is lazily synchronized into the different PML4/PML5 pages of
 the processes using the page fault handler, with init_top_pgt as
 reference.
 
-Current X86-64 implementations support up to 46 bits of address space (64 TB),
-which is our current limit. This expands into MBZ space in the page tables.
-
 We map EFI runtime services in the 'efi_pgd' PGD in a 64Gb large virtual
 memory window (this size is arbitrary, it can be raised later if needed).
 The mappings are not part of any other kernel PGD and are only available
@@ -72,5 +72,3 @@ following fixmap section.
 Note that if CONFIG_RANDOMIZE_MEMORY is enabled, the direct mapping of all
 physical memory, vmalloc/ioremap space and virtual memory map are randomized.
 Their order is preserved but their base will be offset early at boot time.
-
--Andi Kleen, Jul 2004
index 129c591..f3af094 100644 (file)
@@ -2564,6 +2564,7 @@ S:        Maintained
 F:     Documentation/ABI/testing/sysfs-class-net-batman-adv
 F:     Documentation/ABI/testing/sysfs-class-net-mesh
 F:     Documentation/networking/batman-adv.rst
+F:     include/uapi/linux/batadv_packet.h
 F:     include/uapi/linux/batman_adv.h
 F:     net/batman-adv/
 
@@ -11775,15 +11776,13 @@ T:    git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-testing.g
 S:     Maintained
 F:     drivers/net/wireless/realtek/rtl818x/rtl8187/
 
-RTL8192CE WIRELESS DRIVER
-M:     Larry Finger <Larry.Finger@lwfinger.net>
-M:     Chaoming Li <chaoming_li@realsil.com.cn>
+REALTEK WIRELESS DRIVER (rtlwifi family)
+M:     Ping-Ke Shih <pkshih@realtek.com>
 L:     linux-wireless@vger.kernel.org
 W:     http://wireless.kernel.org/
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-testing.git
 S:     Maintained
 F:     drivers/net/wireless/realtek/rtlwifi/
-F:     drivers/net/wireless/realtek/rtlwifi/rtl8192ce/
 
 RTL8XXXU WIRELESS DRIVER (rtl8xxxu)
 M:     Jes Sorensen <Jes.Sorensen@gmail.com>
index 3f4d157..ac8c441 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
 VERSION = 4
 PATCHLEVEL = 15
 SUBLEVEL = 0
-EXTRAVERSION = -rc3
+EXTRAVERSION = -rc5
 NAME = Fearless Coyote
 
 # *DOCUMENTATION*
index 1712f13..b83fdc0 100644 (file)
                .pushsection .text.fixup,"ax"
                .align  4
 9001:          mov     r4, #-EFAULT
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+               ldr     r5, [sp, #9*4]          @ *err_ptr
+#else
                ldr     r5, [sp, #8*4]          @ *err_ptr
+#endif
                str     r4, [r5]
                ldmia   sp, {r1, r2}            @ retrieve dst, len
                add     r2, r2, r1
index 321c9c0..f4363d4 100644 (file)
@@ -74,6 +74,9 @@ static void __hyp_text __debug_save_spe_nvhe(u64 *pmscr_el1)
 {
        u64 reg;
 
+       /* Clear pmscr in case of early return */
+       *pmscr_el1 = 0;
+
        /* SPE present on this CPU? */
        if (!cpuid_feature_extract_unsigned_field(read_sysreg(id_aa64dfr0_el1),
                                                  ID_AA64DFR0_PMSVER_SHIFT))
index 396490c..acaa935 100644 (file)
@@ -897,6 +897,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
                image_ptr = jit_data->image;
                header = jit_data->header;
                extra_pass = true;
+               image_size = sizeof(u32) * ctx.idx;
                goto skip_init_ctx;
        }
        memset(&ctx, 0, sizeof(ctx));
index 9345b44..f57118e 100644 (file)
@@ -123,8 +123,8 @@ int puts(const char *s)
        while ((nuline = strchr(s, '\n')) != NULL) {
                if (nuline != s)
                        pdc_iodc_print(s, nuline - s);
-                       pdc_iodc_print("\r\n", 2);
-                       s = nuline + 1;
+               pdc_iodc_print("\r\n", 2);
+               s = nuline + 1;
        }
        if (*s != '\0')
                pdc_iodc_print(s, strlen(s));
index c980a02..598c8d6 100644 (file)
@@ -35,7 +35,12 @@ struct thread_info {
 
 /* thread information allocation */
 
+#ifdef CONFIG_IRQSTACKS
+#define THREAD_SIZE_ORDER      2 /* PA-RISC requires at least 16k stack */
+#else
 #define THREAD_SIZE_ORDER      3 /* PA-RISC requires at least 32k stack */
+#endif
+
 /* Be sure to hunt all references to this down when you change the size of
  * the kernel stack */
 #define THREAD_SIZE             (PAGE_SIZE << THREAD_SIZE_ORDER)
index a4fd296..f3cecf5 100644 (file)
@@ -878,9 +878,6 @@ ENTRY_CFI(syscall_exit_rfi)
        STREG   %r19,PT_SR7(%r16)
 
 intr_return:
-       /* NOTE: Need to enable interrupts incase we schedule. */
-       ssm     PSW_SM_I, %r0
-
        /* check for reschedule */
        mfctl   %cr30,%r1
        LDREG   TI_FLAGS(%r1),%r19      /* sched.h: TIF_NEED_RESCHED */
@@ -907,6 +904,11 @@ intr_check_sig:
        LDREG   PT_IASQ1(%r16), %r20
        cmpib,COND(=),n 0,%r20,intr_restore /* backward */
 
+       /* NOTE: We need to enable interrupts if we have to deliver
+        * signals. We used to do this earlier but it caused kernel
+        * stack overflows. */
+       ssm     PSW_SM_I, %r0
+
        copy    %r0, %r25                       /* long in_syscall = 0 */
 #ifdef CONFIG_64BIT
        ldo     -16(%r30),%r29                  /* Reference param save area */
@@ -958,6 +960,10 @@ intr_do_resched:
        cmpib,COND(=)   0, %r20, intr_do_preempt
        nop
 
+       /* NOTE: We need to enable interrupts if we schedule.  We used
+        * to do this earlier but it caused kernel stack overflows. */
+       ssm     PSW_SM_I, %r0
+
 #ifdef CONFIG_64BIT
        ldo     -16(%r30),%r29          /* Reference param save area */
 #endif
index e3a8e5e..8d072c4 100644 (file)
@@ -305,6 +305,7 @@ ENDPROC_CFI(os_hpmc)
 
 
        __INITRODATA
+       .align 4
        .export os_hpmc_size
 os_hpmc_size:
        .word .os_hpmc_end-.os_hpmc
index 5a65798..143f90e 100644 (file)
@@ -15,7 +15,6 @@
 #include <linux/slab.h>
 #include <linux/kallsyms.h>
 #include <linux/sort.h>
-#include <linux/sched.h>
 
 #include <linux/uaccess.h>
 #include <asm/assembly.h>
index 7eab4bb..66e5065 100644 (file)
@@ -16,9 +16,7 @@
 #include <linux/preempt.h>
 #include <linux/init.h>
 
-#include <asm/processor.h>
 #include <asm/delay.h>
-
 #include <asm/special_insns.h>    /* for mfctl() */
 #include <asm/processor.h> /* for boot_cpu_data */
 
index 6177d43..e2a2b84 100644 (file)
@@ -160,9 +160,10 @@ static inline void enter_lazy_tlb(struct mm_struct *mm,
 #endif
 }
 
-static inline void arch_dup_mmap(struct mm_struct *oldmm,
-                                struct mm_struct *mm)
+static inline int arch_dup_mmap(struct mm_struct *oldmm,
+                               struct mm_struct *mm)
 {
+       return 0;
 }
 
 #ifndef CONFIG_PPC_BOOK3S_64
index 5acb5a1..72be0c3 100644 (file)
@@ -1403,7 +1403,7 @@ void show_regs(struct pt_regs * regs)
 
        printk("NIP:  "REG" LR: "REG" CTR: "REG"\n",
               regs->nip, regs->link, regs->ctr);
-       printk("REGS: %p TRAP: %04lx   %s  (%s)\n",
+       printk("REGS: %px TRAP: %04lx   %s  (%s)\n",
               regs, regs->trap, print_tainted(), init_utsname()->release);
        printk("MSR:  "REG" ", regs->msr);
        print_msr_bits(regs->msr);
index bf45784..0d750d2 100644 (file)
@@ -725,7 +725,8 @@ u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu)
 
        /* Return the per-cpu state for state saving/migration */
        return (u64)xc->cppr << KVM_REG_PPC_ICP_CPPR_SHIFT |
-              (u64)xc->mfrr << KVM_REG_PPC_ICP_MFRR_SHIFT;
+              (u64)xc->mfrr << KVM_REG_PPC_ICP_MFRR_SHIFT |
+              (u64)0xff << KVM_REG_PPC_ICP_PPRI_SHIFT;
 }
 
 int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval)
@@ -1558,7 +1559,7 @@ static int xive_set_source(struct kvmppc_xive *xive, long irq, u64 addr)
 
        /*
         * Restore P and Q. If the interrupt was pending, we
-        * force both P and Q, which will trigger a resend.
+        * force Q and !P, which will trigger a resend.
         *
         * That means that a guest that had both an interrupt
         * pending (queued) and Q set will restore with only
@@ -1566,7 +1567,7 @@ static int xive_set_source(struct kvmppc_xive *xive, long irq, u64 addr)
         * is perfectly fine as coalescing interrupts that haven't
         * been presented yet is always allowed.
         */
-       if (val & KVM_XICS_PRESENTED || val & KVM_XICS_PENDING)
+       if (val & KVM_XICS_PRESENTED && !(val & KVM_XICS_PENDING))
                state->old_p = true;
        if (val & KVM_XICS_QUEUED || val & KVM_XICS_PENDING)
                state->old_q = true;
index d5a5bc4..6771c63 100644 (file)
@@ -763,7 +763,8 @@ emit_clear:
                        func = (u8 *) __bpf_call_base + imm;
 
                        /* Save skb pointer if we need to re-cache skb data */
-                       if (bpf_helper_changes_pkt_data(func))
+                       if ((ctx->seen & SEEN_SKB) &&
+                           bpf_helper_changes_pkt_data(func))
                                PPC_BPF_STL(3, 1, bpf_jit_stack_local(ctx));
 
                        bpf_jit_emit_func_call(image, ctx, (u64)func);
@@ -772,7 +773,8 @@ emit_clear:
                        PPC_MR(b2p[BPF_REG_0], 3);
 
                        /* refresh skb cache */
-                       if (bpf_helper_changes_pkt_data(func)) {
+                       if ((ctx->seen & SEEN_SKB) &&
+                           bpf_helper_changes_pkt_data(func)) {
                                /* reload skb pointer to r3 */
                                PPC_BPF_LL(3, 1, bpf_jit_stack_local(ctx));
                                bpf_jit_emit_skb_loads(image, ctx);
index 1538129..fce5457 100644 (file)
@@ -410,8 +410,12 @@ static __u64 power_pmu_bhrb_to(u64 addr)
        int ret;
        __u64 target;
 
-       if (is_kernel_addr(addr))
-               return branch_target((unsigned int *)addr);
+       if (is_kernel_addr(addr)) {
+               if (probe_kernel_read(&instr, (void *)addr, sizeof(instr)))
+                       return 0;
+
+               return branch_target(&instr);
+       }
 
        /* Userspace: need copy instruction here then translate it */
        pagefault_disable();
index 0ead3cd..be4e7f8 100644 (file)
@@ -310,6 +310,19 @@ static int ppc_nest_imc_cpu_offline(unsigned int cpu)
                return 0;
 
        /*
+        * Check whether nest_imc is registered. We could end up here if the
+        * cpuhotplug callback registration fails. i.e, callback invokes the
+        * offline path for all successfully registered nodes. At this stage,
+        * nest_imc pmu will not be registered and we should return here.
+        *
+        * We return with a zero since this is not an offline failure. And
+        * cpuhp_setup_state() returns the actual failure reason to the caller,
+        * which in turn will call the cleanup routine.
+        */
+       if (!nest_pmus)
+               return 0;
+
+       /*
         * Now that this cpu is one of the designated,
         * find a next cpu a) which is online and b) in same chip.
         */
@@ -1171,6 +1184,7 @@ static void imc_common_cpuhp_mem_free(struct imc_pmu *pmu_ptr)
                if (nest_pmus == 1) {
                        cpuhp_remove_state(CPUHP_AP_PERF_POWERPC_NEST_IMC_ONLINE);
                        kfree(nest_imc_refc);
+                       kfree(per_nest_pmu_arr);
                }
 
                if (nest_pmus > 0)
@@ -1195,7 +1209,6 @@ static void imc_common_cpuhp_mem_free(struct imc_pmu *pmu_ptr)
                kfree(pmu_ptr->attr_groups[IMC_EVENT_ATTR]->attrs);
        kfree(pmu_ptr->attr_groups[IMC_EVENT_ATTR]);
        kfree(pmu_ptr);
-       kfree(per_nest_pmu_arr);
        return;
 }
 
@@ -1309,6 +1322,8 @@ int init_imc_pmu(struct device_node *parent, struct imc_pmu *pmu_ptr, int pmu_id
                        ret = nest_pmu_cpumask_init();
                        if (ret) {
                                mutex_unlock(&nest_init_lock);
+                               kfree(nest_imc_refc);
+                               kfree(per_nest_pmu_arr);
                                goto err_free;
                        }
                }
index 6db7856..cdbaad5 100644 (file)
@@ -229,13 +229,55 @@ struct diag204_x_phys_block {
 } __packed;
 
 enum diag26c_sc {
+       DIAG26C_PORT_VNIC    = 0x00000024,
        DIAG26C_MAC_SERVICES = 0x00000030
 };
 
 enum diag26c_version {
-       DIAG26C_VERSION2 = 0x00000002   /* z/VM 5.4.0 */
+       DIAG26C_VERSION2         = 0x00000002,  /* z/VM 5.4.0 */
+       DIAG26C_VERSION6_VM65918 = 0x00020006   /* z/VM 6.4.0 + VM65918 */
 };
 
+#define DIAG26C_VNIC_INFO      0x0002
+struct diag26c_vnic_req {
+       u32     resp_buf_len;
+       u32     resp_version;
+       u16     req_format;
+       u16     vlan_id;
+       u64     sys_name;
+       u8      res[2];
+       u16     devno;
+} __packed __aligned(8);
+
+#define VNIC_INFO_PROT_L3      1
+#define VNIC_INFO_PROT_L2      2
+/* Note: this is the bare minimum, use it for uninitialized VNICs only. */
+struct diag26c_vnic_resp {
+       u32     version;
+       u32     entry_cnt;
+       /* VNIC info: */
+       u32     next_entry;
+       u64     owner;
+       u16     devno;
+       u8      status;
+       u8      type;
+       u64     lan_owner;
+       u64     lan_name;
+       u64     port_name;
+       u8      port_type;
+       u8      ext_status:6;
+       u8      protocol:2;
+       u16     base_devno;
+       u32     port_num;
+       u32     ifindex;
+       u32     maxinfo;
+       u32     dev_count;
+       /* 3x device info: */
+       u8      dev_info1[28];
+       u8      dev_info2[28];
+       u8      dev_info3[28];
+} __packed __aligned(8);
+
 #define DIAG26C_GET_MAC        0x0000
 struct diag26c_mac_req {
        u32     resp_buf_len;
index f4baa8c..1dfadbd 100644 (file)
@@ -55,8 +55,7 @@ struct bpf_jit {
 #define SEEN_LITERAL   8       /* code uses literals */
 #define SEEN_FUNC      16      /* calls C functions */
 #define SEEN_TAIL_CALL 32      /* code uses tail calls */
-#define SEEN_SKB_CHANGE        64      /* code changes skb data */
-#define SEEN_REG_AX    128     /* code uses constant blinding */
+#define SEEN_REG_AX    64      /* code uses constant blinding */
 #define SEEN_STACK     (SEEN_FUNC | SEEN_MEM | SEEN_SKB)
 
 /*
@@ -448,12 +447,12 @@ static void bpf_jit_prologue(struct bpf_jit *jit, u32 stack_depth)
                        EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0,
                                      REG_15, 152);
        }
-       if (jit->seen & SEEN_SKB)
+       if (jit->seen & SEEN_SKB) {
                emit_load_skb_data_hlen(jit);
-       if (jit->seen & SEEN_SKB_CHANGE)
                /* stg %b1,ST_OFF_SKBP(%r0,%r15) */
                EMIT6_DISP_LH(0xe3000000, 0x0024, BPF_REG_1, REG_0, REG_15,
                              STK_OFF_SKBP);
+       }
 }
 
 /*
@@ -983,8 +982,8 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
                EMIT2(0x0d00, REG_14, REG_W1);
                /* lgr %b0,%r2: load return value into %b0 */
                EMIT4(0xb9040000, BPF_REG_0, REG_2);
-               if (bpf_helper_changes_pkt_data((void *)func)) {
-                       jit->seen |= SEEN_SKB_CHANGE;
+               if ((jit->seen & SEEN_SKB) &&
+                   bpf_helper_changes_pkt_data((void *)func)) {
                        /* lg %b1,ST_OFF_SKBP(%r15) */
                        EMIT6_DISP_LH(0xe3000000, 0x0004, BPF_REG_1, REG_0,
                                      REG_15, STK_OFF_SKBP);
index be3136f..a8103a8 100644 (file)
@@ -113,7 +113,7 @@ show_signal_msg(struct pt_regs *regs, int sig, int code,
        if (!printk_ratelimit())
                return;
 
-       printk("%s%s[%d]: segfault at %lx ip %p (rpc %p) sp %p error %x",
+       printk("%s%s[%d]: segfault at %lx ip %px (rpc %px) sp %px error %x",
               task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
               tsk->comm, task_pid_nr(tsk), address,
               (void *)regs->pc, (void *)regs->u_regs[UREG_I7],
index 815c03d..41363f4 100644 (file)
@@ -154,7 +154,7 @@ show_signal_msg(struct pt_regs *regs, int sig, int code,
        if (!printk_ratelimit())
                return;
 
-       printk("%s%s[%d]: segfault at %lx ip %p (rpc %p) sp %p error %x",
+       printk("%s%s[%d]: segfault at %lx ip %px (rpc %px) sp %px error %x",
               task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
               tsk->comm, task_pid_nr(tsk), address,
               (void *)regs->tpc, (void *)regs->u_regs[UREG_I7],
index a2f1b5e..635fdef 100644 (file)
@@ -1245,14 +1245,16 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
                u8 *func = ((u8 *)__bpf_call_base) + imm;
 
                ctx->saw_call = true;
+               if (ctx->saw_ld_abs_ind && bpf_helper_changes_pkt_data(func))
+                       emit_reg_move(bpf2sparc[BPF_REG_1], L7, ctx);
 
                emit_call((u32 *)func, ctx);
                emit_nop(ctx);
 
                emit_reg_move(O0, bpf2sparc[BPF_REG_0], ctx);
 
-               if (bpf_helper_changes_pkt_data(func) && ctx->saw_ld_abs_ind)
-                       load_skb_regs(ctx, bpf2sparc[BPF_REG_6]);
+               if (ctx->saw_ld_abs_ind && bpf_helper_changes_pkt_data(func))
+                       load_skb_regs(ctx, L7);
                break;
        }
 
@@ -1507,11 +1509,19 @@ static void jit_fill_hole(void *area, unsigned int size)
                *ptr++ = 0x91d02005; /* ta 5 */
 }
 
+struct sparc64_jit_data {
+       struct bpf_binary_header *header;
+       u8 *image;
+       struct jit_ctx ctx;
+};
+
 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
 {
        struct bpf_prog *tmp, *orig_prog = prog;
+       struct sparc64_jit_data *jit_data;
        struct bpf_binary_header *header;
        bool tmp_blinded = false;
+       bool extra_pass = false;
        struct jit_ctx ctx;
        u32 image_size;
        u8 *image_ptr;
@@ -1531,13 +1541,31 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
                prog = tmp;
        }
 
+       jit_data = prog->aux->jit_data;
+       if (!jit_data) {
+               jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
+               if (!jit_data) {
+                       prog = orig_prog;
+                       goto out;
+               }
+               prog->aux->jit_data = jit_data;
+       }
+       if (jit_data->ctx.offset) {
+               ctx = jit_data->ctx;
+               image_ptr = jit_data->image;
+               header = jit_data->header;
+               extra_pass = true;
+               image_size = sizeof(u32) * ctx.idx;
+               goto skip_init_ctx;
+       }
+
        memset(&ctx, 0, sizeof(ctx));
        ctx.prog = prog;
 
        ctx.offset = kcalloc(prog->len, sizeof(unsigned int), GFP_KERNEL);
        if (ctx.offset == NULL) {
                prog = orig_prog;
-               goto out;
+               goto out_off;
        }
 
        /* Fake pass to detect features used, and get an accurate assessment
@@ -1560,7 +1588,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
        }
 
        ctx.image = (u32 *)image_ptr;
-
+skip_init_ctx:
        for (pass = 1; pass < 3; pass++) {
                ctx.idx = 0;
 
@@ -1591,14 +1619,24 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
 
        bpf_flush_icache(header, (u8 *)header + (header->pages * PAGE_SIZE));
 
-       bpf_jit_binary_lock_ro(header);
+       if (!prog->is_func || extra_pass) {
+               bpf_jit_binary_lock_ro(header);
+       } else {
+               jit_data->ctx = ctx;
+               jit_data->image = image_ptr;
+               jit_data->header = header;
+       }
 
        prog->bpf_func = (void *)ctx.image;
        prog->jited = 1;
        prog->jited_len = image_size;
 
+       if (!prog->is_func || extra_pass) {
 out_off:
-       kfree(ctx.offset);
+               kfree(ctx.offset);
+               kfree(jit_data);
+               prog->aux->jit_data = NULL;
+       }
 out:
        if (tmp_blinded)
                bpf_jit_prog_release_other(prog, prog == orig_prog ?
index b668e35..fca34b2 100644 (file)
@@ -15,9 +15,10 @@ extern void uml_setup_stubs(struct mm_struct *mm);
 /*
  * Needed since we do not use the asm-generic/mm_hooks.h:
  */
-static inline void arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
+static inline int arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
 {
        uml_setup_stubs(mm);
+       return 0;
 }
 extern void arch_exit_mmap(struct mm_struct *mm);
 static inline void arch_unmap(struct mm_struct *mm,
index 4e6fcb3..4286441 100644 (file)
@@ -150,7 +150,7 @@ static void show_segv_info(struct uml_pt_regs *regs)
        if (!printk_ratelimit())
                return;
 
-       printk("%s%s[%d]: segfault at %lx ip %p sp %p error %x",
+       printk("%s%s[%d]: segfault at %lx ip %px sp %px error %x",
                task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
                tsk->comm, task_pid_nr(tsk), FAULT_ADDRESS(*fi),
                (void *)UPT_IP(regs), (void *)UPT_SP(regs),
index 59b06b4..5c205a9 100644 (file)
@@ -81,9 +81,10 @@ do { \
        } \
 } while (0)
 
-static inline void arch_dup_mmap(struct mm_struct *oldmm,
-                                struct mm_struct *mm)
+static inline int arch_dup_mmap(struct mm_struct *oldmm,
+                               struct mm_struct *mm)
 {
+       return 0;
 }
 
 static inline void arch_unmap(struct mm_struct *mm,
index 04d66e6..45dc623 100644 (file)
@@ -927,7 +927,8 @@ config MAXSMP
 config NR_CPUS
        int "Maximum number of CPUs" if SMP && !MAXSMP
        range 2 8 if SMP && X86_32 && !X86_BIGSMP
-       range 2 512 if SMP && !MAXSMP && !CPUMASK_OFFSTACK
+       range 2 64 if SMP && X86_32 && X86_BIGSMP
+       range 2 512 if SMP && !MAXSMP && !CPUMASK_OFFSTACK && X86_64
        range 2 8192 if SMP && !MAXSMP && CPUMASK_OFFSTACK && X86_64
        default "1" if !SMP
        default "8192" if MAXSMP
index 4838037..ace8f32 100644 (file)
@@ -941,9 +941,10 @@ ENTRY(debug)
        movl    %esp, %eax                      # pt_regs pointer
 
        /* Are we currently on the SYSENTER stack? */
-       PER_CPU(cpu_tss + CPU_TSS_SYSENTER_stack + SIZEOF_SYSENTER_stack, %ecx)
-       subl    %eax, %ecx      /* ecx = (end of SYSENTER_stack) - esp */
-       cmpl    $SIZEOF_SYSENTER_stack, %ecx
+       movl    PER_CPU_VAR(cpu_entry_area), %ecx
+       addl    $CPU_ENTRY_AREA_entry_stack + SIZEOF_entry_stack, %ecx
+       subl    %eax, %ecx      /* ecx = (end of entry_stack) - esp */
+       cmpl    $SIZEOF_entry_stack, %ecx
        jb      .Ldebug_from_sysenter_stack
 
        TRACE_IRQS_OFF
@@ -984,9 +985,10 @@ ENTRY(nmi)
        movl    %esp, %eax                      # pt_regs pointer
 
        /* Are we currently on the SYSENTER stack? */
-       PER_CPU(cpu_tss + CPU_TSS_SYSENTER_stack + SIZEOF_SYSENTER_stack, %ecx)
-       subl    %eax, %ecx      /* ecx = (end of SYSENTER_stack) - esp */
-       cmpl    $SIZEOF_SYSENTER_stack, %ecx
+       movl    PER_CPU_VAR(cpu_entry_area), %ecx
+       addl    $CPU_ENTRY_AREA_entry_stack + SIZEOF_entry_stack, %ecx
+       subl    %eax, %ecx      /* ecx = (end of entry_stack) - esp */
+       cmpl    $SIZEOF_entry_stack, %ecx
        jb      .Lnmi_from_sysenter_stack
 
        /* Not on SYSENTER stack. */
index f81d50d..3d19c83 100644 (file)
@@ -140,6 +140,64 @@ END(native_usergs_sysret64)
  * with them due to bugs in both AMD and Intel CPUs.
  */
 
+       .pushsection .entry_trampoline, "ax"
+
+/*
+ * The code in here gets remapped into cpu_entry_area's trampoline.  This means
+ * that the assembler and linker have the wrong idea as to where this code
+ * lives (and, in fact, it's mapped more than once, so it's not even at a
+ * fixed address).  So we can't reference any symbols outside the entry
+ * trampoline and expect it to work.
+ *
+ * Instead, we carefully abuse %rip-relative addressing.
+ * _entry_trampoline(%rip) refers to the start of the remapped) entry
+ * trampoline.  We can thus find cpu_entry_area with this macro:
+ */
+
+#define CPU_ENTRY_AREA \
+       _entry_trampoline - CPU_ENTRY_AREA_entry_trampoline(%rip)
+
+/* The top word of the SYSENTER stack is hot and is usable as scratch space. */
+#define RSP_SCRATCH    CPU_ENTRY_AREA_entry_stack + \
+                       SIZEOF_entry_stack - 8 + CPU_ENTRY_AREA
+
+ENTRY(entry_SYSCALL_64_trampoline)
+       UNWIND_HINT_EMPTY
+       swapgs
+
+       /* Stash the user RSP. */
+       movq    %rsp, RSP_SCRATCH
+
+       /* Load the top of the task stack into RSP */
+       movq    CPU_ENTRY_AREA_tss + TSS_sp1 + CPU_ENTRY_AREA, %rsp
+
+       /* Start building the simulated IRET frame. */
+       pushq   $__USER_DS                      /* pt_regs->ss */
+       pushq   RSP_SCRATCH                     /* pt_regs->sp */
+       pushq   %r11                            /* pt_regs->flags */
+       pushq   $__USER_CS                      /* pt_regs->cs */
+       pushq   %rcx                            /* pt_regs->ip */
+
+       /*
+        * x86 lacks a near absolute jump, and we can't jump to the real
+        * entry text with a relative jump.  We could push the target
+        * address and then use retq, but this destroys the pipeline on
+        * many CPUs (wasting over 20 cycles on Sandy Bridge).  Instead,
+        * spill RDI and restore it in a second-stage trampoline.
+        */
+       pushq   %rdi
+       movq    $entry_SYSCALL_64_stage2, %rdi
+       jmp     *%rdi
+END(entry_SYSCALL_64_trampoline)
+
+       .popsection
+
+ENTRY(entry_SYSCALL_64_stage2)
+       UNWIND_HINT_EMPTY
+       popq    %rdi
+       jmp     entry_SYSCALL_64_after_hwframe
+END(entry_SYSCALL_64_stage2)
+
 ENTRY(entry_SYSCALL_64)
        UNWIND_HINT_EMPTY
        /*
@@ -330,8 +388,24 @@ syscall_return_via_sysret:
        popq    %rsi    /* skip rcx */
        popq    %rdx
        popq    %rsi
+
+       /*
+        * Now all regs are restored except RSP and RDI.
+        * Save old stack pointer and switch to trampoline stack.
+        */
+       movq    %rsp, %rdi
+       movq    PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp
+
+       pushq   RSP-RDI(%rdi)   /* RSP */
+       pushq   (%rdi)          /* RDI */
+
+       /*
+        * We are on the trampoline stack.  All regs except RDI are live.
+        * We can do future final exit work right here.
+        */
+
        popq    %rdi
-       movq    RSP-ORIG_RAX(%rsp), %rsp
+       popq    %rsp
        USERGS_SYSRET64
 END(entry_SYSCALL_64)
 
@@ -466,12 +540,13 @@ END(irq_entries_start)
 
 .macro DEBUG_ENTRY_ASSERT_IRQS_OFF
 #ifdef CONFIG_DEBUG_ENTRY
-       pushfq
-       testl $X86_EFLAGS_IF, (%rsp)
+       pushq %rax
+       SAVE_FLAGS(CLBR_RAX)
+       testl $X86_EFLAGS_IF, %eax
        jz .Lokay_\@
        ud2
 .Lokay_\@:
-       addq $8, %rsp
+       popq %rax
 #endif
 .endm
 
@@ -563,6 +638,13 @@ END(irq_entries_start)
 /* 0(%rsp): ~(interrupt number) */
        .macro interrupt func
        cld
+
+       testb   $3, CS-ORIG_RAX(%rsp)
+       jz      1f
+       SWAPGS
+       call    switch_to_thread_stack
+1:
+
        ALLOC_PT_GPREGS_ON_STACK
        SAVE_C_REGS
        SAVE_EXTRA_REGS
@@ -572,12 +654,8 @@ END(irq_entries_start)
        jz      1f
 
        /*
-        * IRQ from user mode.  Switch to kernel gsbase and inform context
-        * tracking that we're in kernel mode.
-        */
-       SWAPGS
-
-       /*
+        * IRQ from user mode.
+        *
         * We need to tell lockdep that IRQs are off.  We can't do this until
         * we fix gsbase, and we should do it before enter_from_user_mode
         * (which can take locks).  Since TRACE_IRQS_OFF idempotent,
@@ -630,10 +708,41 @@ GLOBAL(swapgs_restore_regs_and_return_to_usermode)
        ud2
 1:
 #endif
-       SWAPGS
        POP_EXTRA_REGS
-       POP_C_REGS
-       addq    $8, %rsp        /* skip regs->orig_ax */
+       popq    %r11
+       popq    %r10
+       popq    %r9
+       popq    %r8
+       popq    %rax
+       popq    %rcx
+       popq    %rdx
+       popq    %rsi
+
+       /*
+        * The stack is now user RDI, orig_ax, RIP, CS, EFLAGS, RSP, SS.
+        * Save old stack pointer and switch to trampoline stack.
+        */
+       movq    %rsp, %rdi
+       movq    PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp
+
+       /* Copy the IRET frame to the trampoline stack. */
+       pushq   6*8(%rdi)       /* SS */
+       pushq   5*8(%rdi)       /* RSP */
+       pushq   4*8(%rdi)       /* EFLAGS */
+       pushq   3*8(%rdi)       /* CS */
+       pushq   2*8(%rdi)       /* RIP */
+
+       /* Push user RDI on the trampoline stack. */
+       pushq   (%rdi)
+
+       /*
+        * We are on the trampoline stack.  All regs except RDI are live.
+        * We can do future final exit work right here.
+        */
+
+       /* Restore RDI. */
+       popq    %rdi
+       SWAPGS
        INTERRUPT_RETURN
 
 
@@ -829,7 +938,33 @@ apicinterrupt IRQ_WORK_VECTOR                      irq_work_interrupt              smp_irq_work_interrupt
 /*
  * Exception entry points.
  */
-#define CPU_TSS_IST(x) PER_CPU_VAR(cpu_tss) + (TSS_ist + ((x) - 1) * 8)
+#define CPU_TSS_IST(x) PER_CPU_VAR(cpu_tss_rw) + (TSS_ist + ((x) - 1) * 8)
+
+/*
+ * Switch to the thread stack.  This is called with the IRET frame and
+ * orig_ax on the stack.  (That is, RDI..R12 are not on the stack and
+ * space has not been allocated for them.)
+ */
+ENTRY(switch_to_thread_stack)
+       UNWIND_HINT_FUNC
+
+       pushq   %rdi
+       movq    %rsp, %rdi
+       movq    PER_CPU_VAR(cpu_current_top_of_stack), %rsp
+       UNWIND_HINT sp_offset=16 sp_reg=ORC_REG_DI
+
+       pushq   7*8(%rdi)               /* regs->ss */
+       pushq   6*8(%rdi)               /* regs->rsp */
+       pushq   5*8(%rdi)               /* regs->eflags */
+       pushq   4*8(%rdi)               /* regs->cs */
+       pushq   3*8(%rdi)               /* regs->ip */
+       pushq   2*8(%rdi)               /* regs->orig_ax */
+       pushq   8(%rdi)                 /* return address */
+       UNWIND_HINT_FUNC
+
+       movq    (%rdi), %rdi
+       ret
+END(switch_to_thread_stack)
 
 .macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1
 ENTRY(\sym)
@@ -848,11 +983,12 @@ ENTRY(\sym)
 
        ALLOC_PT_GPREGS_ON_STACK
 
-       .if \paranoid
-       .if \paranoid == 1
+       .if \paranoid < 2
        testb   $3, CS(%rsp)                    /* If coming from userspace, switch stacks */
-       jnz     1f
+       jnz     .Lfrom_usermode_switch_stack_\@
        .endif
+
+       .if \paranoid
        call    paranoid_entry
        .else
        call    error_entry
@@ -894,20 +1030,15 @@ ENTRY(\sym)
        jmp     error_exit
        .endif
 
-       .if \paranoid == 1
+       .if \paranoid < 2
        /*
-        * Paranoid entry from userspace.  Switch stacks and treat it
+        * Entry from userspace.  Switch stacks and treat it
         * as a normal entry.  This means that paranoid handlers
         * run in real process context if user_mode(regs).
         */
-1:
+.Lfrom_usermode_switch_stack_\@:
        call    error_entry
 
-
-       movq    %rsp, %rdi                      /* pt_regs pointer */
-       call    sync_regs
-       movq    %rax, %rsp                      /* switch stack */
-
        movq    %rsp, %rdi                      /* pt_regs pointer */
 
        .if \has_error_code
@@ -1170,6 +1301,14 @@ ENTRY(error_entry)
        SWAPGS
 
 .Lerror_entry_from_usermode_after_swapgs:
+       /* Put us onto the real thread stack. */
+       popq    %r12                            /* save return addr in %12 */
+       movq    %rsp, %rdi                      /* arg0 = pt_regs pointer */
+       call    sync_regs
+       movq    %rax, %rsp                      /* switch stack */
+       ENCODE_FRAME_POINTER
+       pushq   %r12
+
        /*
         * We need to tell lockdep that IRQs are off.  We can't do this until
         * we fix gsbase, and we should do it before enter_from_user_mode
index 568e130..95ad40e 100644 (file)
@@ -48,7 +48,7 @@
  */
 ENTRY(entry_SYSENTER_compat)
        /* Interrupts are off on entry. */
-       SWAPGS_UNSAFE_STACK
+       SWAPGS
        movq    PER_CPU_VAR(cpu_current_top_of_stack), %rsp
 
        /*
@@ -306,8 +306,11 @@ ENTRY(entry_INT80_compat)
         */
        movl    %eax, %eax
 
-       /* Construct struct pt_regs on stack (iret frame is already on stack) */
        pushq   %rax                    /* pt_regs->orig_ax */
+
+       /* switch to thread stack expects orig_ax to be pushed */
+       call    switch_to_thread_stack
+
        pushq   %rdi                    /* pt_regs->di */
        pushq   %rsi                    /* pt_regs->si */
        pushq   %rdx                    /* pt_regs->dx */
index f279ba2..1faf40f 100644 (file)
@@ -37,6 +37,7 @@
 #include <asm/unistd.h>
 #include <asm/fixmap.h>
 #include <asm/traps.h>
+#include <asm/paravirt.h>
 
 #define CREATE_TRACE_POINTS
 #include "vsyscall_trace.h"
@@ -138,6 +139,10 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
 
        WARN_ON_ONCE(address != regs->ip);
 
+       /* This should be unreachable in NATIVE mode. */
+       if (WARN_ON(vsyscall_mode == NATIVE))
+               return false;
+
        if (vsyscall_mode == NONE) {
                warn_bad_vsyscall(KERN_INFO, regs,
                                  "vsyscall attempted with vsyscall=none");
@@ -329,16 +334,47 @@ int in_gate_area_no_mm(unsigned long addr)
        return vsyscall_mode != NONE && (addr & PAGE_MASK) == VSYSCALL_ADDR;
 }
 
+/*
+ * The VSYSCALL page is the only user-accessible page in the kernel address
+ * range.  Normally, the kernel page tables can have _PAGE_USER clear, but
+ * the tables covering VSYSCALL_ADDR need _PAGE_USER set if vsyscalls
+ * are enabled.
+ *
+ * Some day we may create a "minimal" vsyscall mode in which we emulate
+ * vsyscalls but leave the page not present.  If so, we skip calling
+ * this.
+ */
+static void __init set_vsyscall_pgtable_user_bits(void)
+{
+       pgd_t *pgd;
+       p4d_t *p4d;
+       pud_t *pud;
+       pmd_t *pmd;
+
+       pgd = pgd_offset_k(VSYSCALL_ADDR);
+       set_pgd(pgd, __pgd(pgd_val(*pgd) | _PAGE_USER));
+       p4d = p4d_offset(pgd, VSYSCALL_ADDR);
+#if CONFIG_PGTABLE_LEVELS >= 5
+       p4d->p4d |= _PAGE_USER;
+#endif
+       pud = pud_offset(p4d, VSYSCALL_ADDR);
+       set_pud(pud, __pud(pud_val(*pud) | _PAGE_USER));
+       pmd = pmd_offset(pud, VSYSCALL_ADDR);
+       set_pmd(pmd, __pmd(pmd_val(*pmd) | _PAGE_USER));
+}
+
 void __init map_vsyscall(void)
 {
        extern char __vsyscall_page;
        unsigned long physaddr_vsyscall = __pa_symbol(&__vsyscall_page);
 
-       if (vsyscall_mode != NONE)
+       if (vsyscall_mode != NONE) {
                __set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall,
                             vsyscall_mode == NATIVE
                             ? PAGE_KERNEL_VSYSCALL
                             : PAGE_KERNEL_VVAR);
+               set_vsyscall_pgtable_user_bits();
+       }
 
        BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_PAGE) !=
                     (unsigned long)VSYSCALL_ADDR);
diff --git a/arch/x86/include/asm/cpu_entry_area.h b/arch/x86/include/asm/cpu_entry_area.h
new file mode 100644 (file)
index 0000000..2fbc69a
--- /dev/null
@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#ifndef _ASM_X86_CPU_ENTRY_AREA_H
+#define _ASM_X86_CPU_ENTRY_AREA_H
+
+#include <linux/percpu-defs.h>
+#include <asm/processor.h>
+
+/*
+ * cpu_entry_area is a percpu region that contains things needed by the CPU
+ * and early entry/exit code.  Real types aren't used for all fields here
+ * to avoid circular header dependencies.
+ *
+ * Every field is a virtual alias of some other allocated backing store.
+ * There is no direct allocation of a struct cpu_entry_area.
+ */
+struct cpu_entry_area {
+       char gdt[PAGE_SIZE];
+
+       /*
+        * The GDT is just below entry_stack and thus serves (on x86_64) as
+        * a a read-only guard page.
+        */
+       struct entry_stack_page entry_stack_page;
+
+       /*
+        * On x86_64, the TSS is mapped RO.  On x86_32, it's mapped RW because
+        * we need task switches to work, and task switches write to the TSS.
+        */
+       struct tss_struct tss;
+
+       char entry_trampoline[PAGE_SIZE];
+
+#ifdef CONFIG_X86_64
+       /*
+        * Exception stacks used for IST entries.
+        *
+        * In the future, this should have a separate slot for each stack
+        * with guard pages between them.
+        */
+       char exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ];
+#endif
+};
+
+#define CPU_ENTRY_AREA_SIZE    (sizeof(struct cpu_entry_area))
+#define CPU_ENTRY_AREA_TOT_SIZE        (CPU_ENTRY_AREA_SIZE * NR_CPUS)
+
+DECLARE_PER_CPU(struct cpu_entry_area *, cpu_entry_area);
+
+extern void setup_cpu_entry_areas(void);
+extern void cea_set_pte(void *cea_vaddr, phys_addr_t pa, pgprot_t flags);
+
+#define        CPU_ENTRY_AREA_RO_IDT           CPU_ENTRY_AREA_BASE
+#define CPU_ENTRY_AREA_PER_CPU         (CPU_ENTRY_AREA_RO_IDT + PAGE_SIZE)
+
+#define CPU_ENTRY_AREA_RO_IDT_VADDR    ((void *)CPU_ENTRY_AREA_RO_IDT)
+
+#define CPU_ENTRY_AREA_MAP_SIZE                        \
+       (CPU_ENTRY_AREA_PER_CPU + CPU_ENTRY_AREA_TOT_SIZE - CPU_ENTRY_AREA_BASE)
+
+extern struct cpu_entry_area *get_cpu_entry_area(int cpu);
+
+static inline struct entry_stack *cpu_entry_stack(int cpu)
+{
+       return &get_cpu_entry_area(cpu)->entry_stack_page.stack;
+}
+
+#endif
index bf6a762..ea9a7dd 100644 (file)
@@ -135,6 +135,8 @@ extern void clear_cpu_cap(struct cpuinfo_x86 *c, unsigned int bit);
        set_bit(bit, (unsigned long *)cpu_caps_set);    \
 } while (0)
 
+#define setup_force_cpu_bug(bit) setup_force_cpu_cap(bit)
+
 #if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_X86_FAST_FEATURE_TESTS)
 /*
  * Static testing of CPU features.  Used the same as boot_cpu_has().
index 4011cb0..ec8be07 100644 (file)
@@ -7,6 +7,7 @@
 #include <asm/mmu.h>
 #include <asm/fixmap.h>
 #include <asm/irq_vectors.h>
+#include <asm/cpu_entry_area.h>
 
 #include <linux/smp.h>
 #include <linux/percpu.h>
@@ -60,17 +61,10 @@ static inline struct desc_struct *get_current_gdt_rw(void)
        return this_cpu_ptr(&gdt_page)->gdt;
 }
 
-/* Get the fixmap index for a specific processor */
-static inline unsigned int get_cpu_gdt_ro_index(int cpu)
-{
-       return FIX_GDT_REMAP_BEGIN + cpu;
-}
-
 /* Provide the fixmap address of the remapped GDT */
 static inline struct desc_struct *get_cpu_gdt_ro(int cpu)
 {
-       unsigned int idx = get_cpu_gdt_ro_index(cpu);
-       return (struct desc_struct *)__fix_to_virt(idx);
+       return (struct desc_struct *)&get_cpu_entry_area(cpu)->gdt;
 }
 
 /* Provide the current read-only GDT */
@@ -185,7 +179,7 @@ static inline void set_tssldt_descriptor(void *d, unsigned long addr,
 #endif
 }
 
-static inline void __set_tss_desc(unsigned cpu, unsigned int entry, void *addr)
+static inline void __set_tss_desc(unsigned cpu, unsigned int entry, struct x86_hw_tss *addr)
 {
        struct desc_struct *d = get_cpu_gdt_rw(cpu);
        tss_desc tss;
index 0211029..6777480 100644 (file)
@@ -2,7 +2,7 @@
 #ifndef _ASM_X86_ESPFIX_H
 #define _ASM_X86_ESPFIX_H
 
-#ifdef CONFIG_X86_64
+#ifdef CONFIG_X86_ESPFIX64
 
 #include <asm/percpu.h>
 
@@ -11,7 +11,8 @@ DECLARE_PER_CPU_READ_MOSTLY(unsigned long, espfix_waddr);
 
 extern void init_espfix_bsp(void);
 extern void init_espfix_ap(int cpu);
-
-#endif /* CONFIG_X86_64 */
+#else
+static inline void init_espfix_ap(int cpu) { }
+#endif
 
 #endif /* _ASM_X86_ESPFIX_H */
index b0c505f..64c4a30 100644 (file)
@@ -44,7 +44,6 @@ extern unsigned long __FIXADDR_TOP;
                         PAGE_SIZE)
 #endif
 
-
 /*
  * Here we define all the compile-time 'special' virtual
  * addresses. The point is to have a constant address at
@@ -84,7 +83,6 @@ enum fixed_addresses {
        FIX_IO_APIC_BASE_0,
        FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS - 1,
 #endif
-       FIX_RO_IDT,     /* Virtual mapping for read-only IDT */
 #ifdef CONFIG_X86_32
        FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
        FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
@@ -100,9 +98,6 @@ enum fixed_addresses {
 #ifdef CONFIG_X86_INTEL_MID
        FIX_LNW_VRTC,
 #endif
-       /* Fixmap entries to remap the GDTs, one per processor. */
-       FIX_GDT_REMAP_BEGIN,
-       FIX_GDT_REMAP_END = FIX_GDT_REMAP_BEGIN + NR_CPUS - 1,
 
 #ifdef CONFIG_ACPI_APEI_GHES
        /* Used for GHES mapping from assorted contexts */
@@ -143,7 +138,7 @@ enum fixed_addresses {
 extern void reserve_top_address(unsigned long reserve);
 
 #define FIXADDR_SIZE   (__end_of_permanent_fixed_addresses << PAGE_SHIFT)
-#define FIXADDR_START          (FIXADDR_TOP - FIXADDR_SIZE)
+#define FIXADDR_START  (FIXADDR_TOP - FIXADDR_SIZE)
 
 extern int fixmaps_set;
 
index 1b0a5ab..96aa6b9 100644 (file)
 #ifndef _ASM_X86_HYPERVISOR_H
 #define _ASM_X86_HYPERVISOR_H
 
-#ifdef CONFIG_HYPERVISOR_GUEST
-
-#include <asm/kvm_para.h>
-#include <asm/x86_init.h>
-#include <asm/xen/hypervisor.h>
-
-/*
- * x86 hypervisor information
- */
-
+/* x86 hypervisor types  */
 enum x86_hypervisor_type {
        X86_HYPER_NATIVE = 0,
        X86_HYPER_VMWARE,
@@ -39,6 +30,12 @@ enum x86_hypervisor_type {
        X86_HYPER_KVM,
 };
 
+#ifdef CONFIG_HYPERVISOR_GUEST
+
+#include <asm/kvm_para.h>
+#include <asm/x86_init.h>
+#include <asm/xen/hypervisor.h>
+
 struct hypervisor_x86 {
        /* Hypervisor name */
        const char      *name;
@@ -58,7 +55,15 @@ struct hypervisor_x86 {
 
 extern enum x86_hypervisor_type x86_hyper_type;
 extern void init_hypervisor_platform(void);
+static inline bool hypervisor_is_type(enum x86_hypervisor_type type)
+{
+       return x86_hyper_type == type;
+}
 #else
 static inline void init_hypervisor_platform(void) { }
+static inline bool hypervisor_is_type(enum x86_hypervisor_type type)
+{
+       return type == X86_HYPER_NATIVE;
+}
 #endif /* CONFIG_HYPERVISOR_GUEST */
 #endif /* _ASM_X86_HYPERVISOR_H */
diff --git a/arch/x86/include/asm/invpcid.h b/arch/x86/include/asm/invpcid.h
new file mode 100644 (file)
index 0000000..989cfa8
--- /dev/null
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_X86_INVPCID
+#define _ASM_X86_INVPCID
+
+static inline void __invpcid(unsigned long pcid, unsigned long addr,
+                            unsigned long type)
+{
+       struct { u64 d[2]; } desc = { { pcid, addr } };
+
+       /*
+        * The memory clobber is because the whole point is to invalidate
+        * stale TLB entries and, especially if we're flushing global
+        * mappings, we don't want the compiler to reorder any subsequent
+        * memory accesses before the TLB flush.
+        *
+        * The hex opcode is invpcid (%ecx), %eax in 32-bit mode and
+        * invpcid (%rcx), %rax in long mode.
+        */
+       asm volatile (".byte 0x66, 0x0f, 0x38, 0x82, 0x01"
+                     : : "m" (desc), "a" (type), "c" (&desc) : "memory");
+}
+
+#define INVPCID_TYPE_INDIV_ADDR                0
+#define INVPCID_TYPE_SINGLE_CTXT       1
+#define INVPCID_TYPE_ALL_INCL_GLOBAL   2
+#define INVPCID_TYPE_ALL_NON_GLOBAL    3
+
+/* Flush all mappings for a given pcid and addr, not including globals. */
+static inline void invpcid_flush_one(unsigned long pcid,
+                                    unsigned long addr)
+{
+       __invpcid(pcid, addr, INVPCID_TYPE_INDIV_ADDR);
+}
+
+/* Flush all mappings for a given PCID, not including globals. */
+static inline void invpcid_flush_single_context(unsigned long pcid)
+{
+       __invpcid(pcid, 0, INVPCID_TYPE_SINGLE_CTXT);
+}
+
+/* Flush all mappings, including globals, for all PCIDs. */
+static inline void invpcid_flush_all(void)
+{
+       __invpcid(0, 0, INVPCID_TYPE_ALL_INCL_GLOBAL);
+}
+
+/* Flush all mappings for all PCIDs except globals. */
+static inline void invpcid_flush_all_nonglobals(void)
+{
+       __invpcid(0, 0, INVPCID_TYPE_ALL_NON_GLOBAL);
+}
+
+#endif /* _ASM_X86_INVPCID */
index c8ef23f..89f0895 100644 (file)
@@ -142,6 +142,9 @@ static inline notrace unsigned long arch_local_irq_save(void)
        swapgs;                                 \
        sysretl
 
+#ifdef CONFIG_DEBUG_ENTRY
+#define SAVE_FLAGS(x)          pushfq; popq %rax
+#endif
 #else
 #define INTERRUPT_RETURN               iret
 #define ENABLE_INTERRUPTS_SYSEXIT      sti; sysexit
index f86a8ca..395c963 100644 (file)
@@ -26,6 +26,7 @@ extern void die(const char *, struct pt_regs *,long);
 extern int __must_check __die(const char *, struct pt_regs *, long);
 extern void show_stack_regs(struct pt_regs *regs);
 extern void __show_regs(struct pt_regs *regs, int all);
+extern void show_iret_regs(struct pt_regs *regs);
 extern unsigned long oops_begin(void);
 extern void oops_end(unsigned long, struct pt_regs *, int signr);
 
index 9ea26f1..5ff3e8a 100644 (file)
@@ -3,6 +3,7 @@
 #define _ASM_X86_MMU_H
 
 #include <linux/spinlock.h>
+#include <linux/rwsem.h>
 #include <linux/mutex.h>
 #include <linux/atomic.h>
 
@@ -27,7 +28,8 @@ typedef struct {
        atomic64_t tlb_gen;
 
 #ifdef CONFIG_MODIFY_LDT_SYSCALL
-       struct ldt_struct *ldt;
+       struct rw_semaphore     ldt_usr_sem;
+       struct ldt_struct       *ldt;
 #endif
 
 #ifdef CONFIG_X86_64
index 6d16d15..5ede7ca 100644 (file)
@@ -57,11 +57,17 @@ struct ldt_struct {
 /*
  * Used for LDT copy/destruction.
  */
-int init_new_context_ldt(struct task_struct *tsk, struct mm_struct *mm);
+static inline void init_new_context_ldt(struct mm_struct *mm)
+{
+       mm->context.ldt = NULL;
+       init_rwsem(&mm->context.ldt_usr_sem);
+}
+int ldt_dup_context(struct mm_struct *oldmm, struct mm_struct *mm);
 void destroy_context_ldt(struct mm_struct *mm);
 #else  /* CONFIG_MODIFY_LDT_SYSCALL */
-static inline int init_new_context_ldt(struct task_struct *tsk,
-                                      struct mm_struct *mm)
+static inline void init_new_context_ldt(struct mm_struct *mm) { }
+static inline int ldt_dup_context(struct mm_struct *oldmm,
+                                 struct mm_struct *mm)
 {
        return 0;
 }
@@ -132,18 +138,21 @@ void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk);
 static inline int init_new_context(struct task_struct *tsk,
                                   struct mm_struct *mm)
 {
+       mutex_init(&mm->context.lock);
+
        mm->context.ctx_id = atomic64_inc_return(&last_mm_ctx_id);
        atomic64_set(&mm->context.tlb_gen, 0);
 
-       #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
+#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
        if (cpu_feature_enabled(X86_FEATURE_OSPKE)) {
                /* pkey 0 is the default and always allocated */
                mm->context.pkey_allocation_map = 0x1;
                /* -1 means unallocated or invalid */
                mm->context.execute_only_pkey = -1;
        }
-       #endif
-       return init_new_context_ldt(tsk, mm);
+#endif
+       init_new_context_ldt(mm);
+       return 0;
 }
 static inline void destroy_context(struct mm_struct *mm)
 {
@@ -176,10 +185,10 @@ do {                                              \
 } while (0)
 #endif
 
-static inline void arch_dup_mmap(struct mm_struct *oldmm,
-                                struct mm_struct *mm)
+static inline int arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
 {
        paravirt_arch_dup_mmap(oldmm, mm);
+       return ldt_dup_context(oldmm, mm);
 }
 
 static inline void arch_exit_mmap(struct mm_struct *mm)
@@ -282,33 +291,6 @@ static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
 }
 
 /*
- * If PCID is on, ASID-aware code paths put the ASID+1 into the PCID
- * bits.  This serves two purposes.  It prevents a nasty situation in
- * which PCID-unaware code saves CR3, loads some other value (with PCID
- * == 0), and then restores CR3, thus corrupting the TLB for ASID 0 if
- * the saved ASID was nonzero.  It also means that any bugs involving
- * loading a PCID-enabled CR3 with CR4.PCIDE off will trigger
- * deterministically.
- */
-
-static inline unsigned long build_cr3(struct mm_struct *mm, u16 asid)
-{
-       if (static_cpu_has(X86_FEATURE_PCID)) {
-               VM_WARN_ON_ONCE(asid > 4094);
-               return __sme_pa(mm->pgd) | (asid + 1);
-       } else {
-               VM_WARN_ON_ONCE(asid != 0);
-               return __sme_pa(mm->pgd);
-       }
-}
-
-static inline unsigned long build_cr3_noflush(struct mm_struct *mm, u16 asid)
-{
-       VM_WARN_ON_ONCE(asid > 4094);
-       return __sme_pa(mm->pgd) | (asid + 1) | CR3_NOFLUSH;
-}
-
-/*
  * This can be used from process context to figure out what the value of
  * CR3 is without needing to do a (slow) __read_cr3().
  *
@@ -317,7 +299,7 @@ static inline unsigned long build_cr3_noflush(struct mm_struct *mm, u16 asid)
  */
 static inline unsigned long __get_current_cr3_fast(void)
 {
-       unsigned long cr3 = build_cr3(this_cpu_read(cpu_tlbstate.loaded_mm),
+       unsigned long cr3 = build_cr3(this_cpu_read(cpu_tlbstate.loaded_mm)->pgd,
                this_cpu_read(cpu_tlbstate.loaded_mm_asid));
 
        /* For now, be very restrictive about when this can be called. */
index 283efca..892df37 100644 (file)
@@ -927,6 +927,15 @@ extern void default_banner(void);
        PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret64),       \
                  CLBR_NONE,                                            \
                  jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret64))
+
+#ifdef CONFIG_DEBUG_ENTRY
+#define SAVE_FLAGS(clobbers)                                        \
+       PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_save_fl), clobbers, \
+                 PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE);        \
+                 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_save_fl);    \
+                 PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
+#endif
+
 #endif /* CONFIG_X86_32 */
 
 #endif /* __ASSEMBLY__ */
index f2ca9b2..ce245b0 100644 (file)
@@ -38,13 +38,22 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
 #define LAST_PKMAP 1024
 #endif
 
-#define PKMAP_BASE ((FIXADDR_START - PAGE_SIZE * (LAST_PKMAP + 1))     \
-                   & PMD_MASK)
+/*
+ * Define this here and validate with BUILD_BUG_ON() in pgtable_32.c
+ * to avoid include recursion hell
+ */
+#define CPU_ENTRY_AREA_PAGES   (NR_CPUS * 40)
+
+#define CPU_ENTRY_AREA_BASE                            \
+       ((FIXADDR_START - PAGE_SIZE * (CPU_ENTRY_AREA_PAGES + 1)) & PMD_MASK)
+
+#define PKMAP_BASE             \
+       ((CPU_ENTRY_AREA_BASE - PAGE_SIZE) & PMD_MASK)
 
 #ifdef CONFIG_HIGHMEM
 # define VMALLOC_END   (PKMAP_BASE - 2 * PAGE_SIZE)
 #else
-# define VMALLOC_END   (FIXADDR_START - 2 * PAGE_SIZE)
+# define VMALLOC_END   (CPU_ENTRY_AREA_BASE - 2 * PAGE_SIZE)
 #endif
 
 #define MODULES_VADDR  VMALLOC_START
index 6d5f45d..3d27831 100644 (file)
@@ -76,32 +76,41 @@ typedef struct { pteval_t pte; } pte_t;
 #define PGDIR_MASK     (~(PGDIR_SIZE - 1))
 
 /* See Documentation/x86/x86_64/mm.txt for a description of the memory map. */
-#define MAXMEM         _AC(__AC(1, UL) << MAX_PHYSMEM_BITS, UL)
+#define MAXMEM                 _AC(__AC(1, UL) << MAX_PHYSMEM_BITS, UL)
+
 #ifdef CONFIG_X86_5LEVEL
-#define VMALLOC_SIZE_TB _AC(16384, UL)
-#define __VMALLOC_BASE _AC(0xff92000000000000, UL)
-#define __VMEMMAP_BASE _AC(0xffd4000000000000, UL)
+# define VMALLOC_SIZE_TB       _AC(16384, UL)
+# define __VMALLOC_BASE                _AC(0xff92000000000000, UL)
+# define __VMEMMAP_BASE                _AC(0xffd4000000000000, UL)
 #else
-#define VMALLOC_SIZE_TB        _AC(32, UL)
-#define __VMALLOC_BASE _AC(0xffffc90000000000, UL)
-#define __VMEMMAP_BASE _AC(0xffffea0000000000, UL)
+# define VMALLOC_SIZE_TB       _AC(32, UL)
+# define __VMALLOC_BASE                _AC(0xffffc90000000000, UL)
+# define __VMEMMAP_BASE                _AC(0xffffea0000000000, UL)
 #endif
+
 #ifdef CONFIG_RANDOMIZE_MEMORY
-#define VMALLOC_START  vmalloc_base
-#define VMEMMAP_START  vmemmap_base
+# define VMALLOC_START         vmalloc_base
+# define VMEMMAP_START         vmemmap_base
 #else
-#define VMALLOC_START  __VMALLOC_BASE
-#define VMEMMAP_START  __VMEMMAP_BASE
+# define VMALLOC_START         __VMALLOC_BASE
+# define VMEMMAP_START         __VMEMMAP_BASE
 #endif /* CONFIG_RANDOMIZE_MEMORY */
-#define VMALLOC_END    (VMALLOC_START + _AC((VMALLOC_SIZE_TB << 40) - 1, UL))
-#define MODULES_VADDR    (__START_KERNEL_map + KERNEL_IMAGE_SIZE)
+
+#define VMALLOC_END            (VMALLOC_START + _AC((VMALLOC_SIZE_TB << 40) - 1, UL))
+
+#define MODULES_VADDR          (__START_KERNEL_map + KERNEL_IMAGE_SIZE)
 /* The module sections ends with the start of the fixmap */
-#define MODULES_END   __fix_to_virt(__end_of_fixed_addresses + 1)
-#define MODULES_LEN   (MODULES_END - MODULES_VADDR)
-#define ESPFIX_PGD_ENTRY _AC(-2, UL)
-#define ESPFIX_BASE_ADDR (ESPFIX_PGD_ENTRY << P4D_SHIFT)
-#define EFI_VA_START    ( -4 * (_AC(1, UL) << 30))
-#define EFI_VA_END      (-68 * (_AC(1, UL) << 30))
+#define MODULES_END            __fix_to_virt(__end_of_fixed_addresses + 1)
+#define MODULES_LEN            (MODULES_END - MODULES_VADDR)
+
+#define ESPFIX_PGD_ENTRY       _AC(-2, UL)
+#define ESPFIX_BASE_ADDR       (ESPFIX_PGD_ENTRY << P4D_SHIFT)
+
+#define CPU_ENTRY_AREA_PGD     _AC(-3, UL)
+#define CPU_ENTRY_AREA_BASE    (CPU_ENTRY_AREA_PGD << P4D_SHIFT)
+
+#define EFI_VA_START           ( -4 * (_AC(1, UL) << 30))
+#define EFI_VA_END             (-68 * (_AC(1, UL) << 30))
 
 #define EARLY_DYNAMIC_PAGE_TABLES      64
 
index cc16fa8..cad8dab 100644 (file)
@@ -163,9 +163,9 @@ enum cpuid_regs_idx {
 extern struct cpuinfo_x86      boot_cpu_data;
 extern struct cpuinfo_x86      new_cpu_data;
 
-extern struct tss_struct       doublefault_tss;
-extern __u32                   cpu_caps_cleared[NCAPINTS];
-extern __u32                   cpu_caps_set[NCAPINTS];
+extern struct x86_hw_tss       doublefault_tss;
+extern __u32                   cpu_caps_cleared[NCAPINTS + NBUGINTS];
+extern __u32                   cpu_caps_set[NCAPINTS + NBUGINTS];
 
 #ifdef CONFIG_SMP
 DECLARE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info);
@@ -253,6 +253,11 @@ static inline void load_cr3(pgd_t *pgdir)
        write_cr3(__sme_pa(pgdir));
 }
 
+/*
+ * Note that while the legacy 'TSS' name comes from 'Task State Segment',
+ * on modern x86 CPUs the TSS also holds information important to 64-bit mode,
+ * unrelated to the task-switch mechanism:
+ */
 #ifdef CONFIG_X86_32
 /* This is the TSS defined by the hardware. */
 struct x86_hw_tss {
@@ -305,7 +310,13 @@ struct x86_hw_tss {
 struct x86_hw_tss {
        u32                     reserved1;
        u64                     sp0;
+
+       /*
+        * We store cpu_current_top_of_stack in sp1 so it's always accessible.
+        * Linux does not use ring 1, so sp1 is not otherwise needed.
+        */
        u64                     sp1;
+
        u64                     sp2;
        u64                     reserved2;
        u64                     ist[7];
@@ -323,12 +334,22 @@ struct x86_hw_tss {
 #define IO_BITMAP_BITS                 65536
 #define IO_BITMAP_BYTES                        (IO_BITMAP_BITS/8)
 #define IO_BITMAP_LONGS                        (IO_BITMAP_BYTES/sizeof(long))
-#define IO_BITMAP_OFFSET               offsetof(struct tss_struct, io_bitmap)
+#define IO_BITMAP_OFFSET               (offsetof(struct tss_struct, io_bitmap) - offsetof(struct tss_struct, x86_tss))
 #define INVALID_IO_BITMAP_OFFSET       0x8000
 
+struct entry_stack {
+       unsigned long           words[64];
+};
+
+struct entry_stack_page {
+       struct entry_stack stack;
+} __aligned(PAGE_SIZE);
+
 struct tss_struct {
        /*
-        * The hardware state:
+        * The fixed hardware portion.  This must not cross a page boundary
+        * at risk of violating the SDM's advice and potentially triggering
+        * errata.
         */
        struct x86_hw_tss       x86_tss;
 
@@ -339,18 +360,9 @@ struct tss_struct {
         * be within the limit.
         */
        unsigned long           io_bitmap[IO_BITMAP_LONGS + 1];
+} __aligned(PAGE_SIZE);
 
-#ifdef CONFIG_X86_32
-       /*
-        * Space for the temporary SYSENTER stack.
-        */
-       unsigned long           SYSENTER_stack_canary;
-       unsigned long           SYSENTER_stack[64];
-#endif
-
-} ____cacheline_aligned;
-
-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss);
+DECLARE_PER_CPU_PAGE_ALIGNED(struct tss_struct, cpu_tss_rw);
 
 /*
  * sizeof(unsigned long) coming from an extra "long" at the end
@@ -364,6 +376,9 @@ DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss);
 
 #ifdef CONFIG_X86_32
 DECLARE_PER_CPU(unsigned long, cpu_current_top_of_stack);
+#else
+/* The RO copy can't be accessed with this_cpu_xyz(), so use the RW copy. */
+#define cpu_current_top_of_stack cpu_tss_rw.x86_tss.sp1
 #endif
 
 /*
@@ -523,7 +538,7 @@ static inline void native_set_iopl_mask(unsigned mask)
 static inline void
 native_load_sp0(unsigned long sp0)
 {
-       this_cpu_write(cpu_tss.x86_tss.sp0, sp0);
+       this_cpu_write(cpu_tss_rw.x86_tss.sp0, sp0);
 }
 
 static inline void native_swapgs(void)
@@ -535,12 +550,12 @@ static inline void native_swapgs(void)
 
 static inline unsigned long current_top_of_stack(void)
 {
-#ifdef CONFIG_X86_64
-       return this_cpu_read_stable(cpu_tss.x86_tss.sp0);
-#else
-       /* sp0 on x86_32 is special in and around vm86 mode. */
+       /*
+        *  We can't read directly from tss.sp0: sp0 on x86_32 is special in
+        *  and around vm86 mode and sp0 on x86_64 is special because of the
+        *  entry trampoline.
+        */
        return this_cpu_read_stable(cpu_current_top_of_stack);
-#endif
 }
 
 static inline bool on_thread_stack(void)
index 8da111b..f737068 100644 (file)
@@ -16,6 +16,7 @@ enum stack_type {
        STACK_TYPE_TASK,
        STACK_TYPE_IRQ,
        STACK_TYPE_SOFTIRQ,
+       STACK_TYPE_ENTRY,
        STACK_TYPE_EXCEPTION,
        STACK_TYPE_EXCEPTION_LAST = STACK_TYPE_EXCEPTION + N_EXCEPTION_STACKS-1,
 };
@@ -28,6 +29,8 @@ struct stack_info {
 bool in_task_stack(unsigned long *stack, struct task_struct *task,
                   struct stack_info *info);
 
+bool in_entry_stack(unsigned long *stack, struct stack_info *info);
+
 int get_stack_info(unsigned long *stack, struct task_struct *task,
                   struct stack_info *info, unsigned long *visit_mask);
 
index 8c6bd68..9b6df68 100644 (file)
@@ -79,10 +79,10 @@ do {                                                                        \
 static inline void refresh_sysenter_cs(struct thread_struct *thread)
 {
        /* Only happens when SEP is enabled, no need to test "SEP"arately: */
-       if (unlikely(this_cpu_read(cpu_tss.x86_tss.ss1) == thread->sysenter_cs))
+       if (unlikely(this_cpu_read(cpu_tss_rw.x86_tss.ss1) == thread->sysenter_cs))
                return;
 
-       this_cpu_write(cpu_tss.x86_tss.ss1, thread->sysenter_cs);
+       this_cpu_write(cpu_tss_rw.x86_tss.ss1, thread->sysenter_cs);
        wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
 }
 #endif
@@ -90,10 +90,12 @@ static inline void refresh_sysenter_cs(struct thread_struct *thread)
 /* This is used when switching tasks or entering/exiting vm86 mode. */
 static inline void update_sp0(struct task_struct *task)
 {
+       /* On x86_64, sp0 always points to the entry trampoline stack, which is constant: */
 #ifdef CONFIG_X86_32
        load_sp0(task->thread.sp0);
 #else
-       load_sp0(task_top_of_stack(task));
+       if (static_cpu_has(X86_FEATURE_XENPV))
+               load_sp0(task_top_of_stack(task));
 #endif
 }
 
index 70f4259..0022333 100644 (file)
@@ -207,7 +207,7 @@ static inline int arch_within_stack_frames(const void * const stack,
 #else /* !__ASSEMBLY__ */
 
 #ifdef CONFIG_X86_64
-# define cpu_current_top_of_stack (cpu_tss + TSS_sp0)
+# define cpu_current_top_of_stack (cpu_tss_rw + TSS_sp1)
 #endif
 
 #endif
index 877b5c1..e1884cf 100644 (file)
@@ -9,70 +9,66 @@
 #include <asm/cpufeature.h>
 #include <asm/special_insns.h>
 #include <asm/smp.h>
+#include <asm/invpcid.h>
 
-static inline void __invpcid(unsigned long pcid, unsigned long addr,
-                            unsigned long type)
+static inline u64 inc_mm_tlb_gen(struct mm_struct *mm)
 {
-       struct { u64 d[2]; } desc = { { pcid, addr } };
-
        /*
-        * The memory clobber is because the whole point is to invalidate
-        * stale TLB entries and, especially if we're flushing global
-        * mappings, we don't want the compiler to reorder any subsequent
-        * memory accesses before the TLB flush.
-        *
-        * The hex opcode is invpcid (%ecx), %eax in 32-bit mode and
-        * invpcid (%rcx), %rax in long mode.
+        * Bump the generation count.  This also serves as a full barrier
+        * that synchronizes with switch_mm(): callers are required to order
+        * their read of mm_cpumask after their writes to the paging
+        * structures.
         */
-       asm volatile (".byte 0x66, 0x0f, 0x38, 0x82, 0x01"
-                     : : "m" (desc), "a" (type), "c" (&desc) : "memory");
+       return atomic64_inc_return(&mm->context.tlb_gen);
 }
 
-#define INVPCID_TYPE_INDIV_ADDR                0
-#define INVPCID_TYPE_SINGLE_CTXT       1
-#define INVPCID_TYPE_ALL_INCL_GLOBAL   2
-#define INVPCID_TYPE_ALL_NON_GLOBAL    3
+/* There are 12 bits of space for ASIDS in CR3 */
+#define CR3_HW_ASID_BITS               12
+/*
+ * When enabled, PAGE_TABLE_ISOLATION consumes a single bit for
+ * user/kernel switches
+ */
+#define PTI_CONSUMED_ASID_BITS         0
 
-/* Flush all mappings for a given pcid and addr, not including globals. */
-static inline void invpcid_flush_one(unsigned long pcid,
-                                    unsigned long addr)
-{
-       __invpcid(pcid, addr, INVPCID_TYPE_INDIV_ADDR);
-}
+#define CR3_AVAIL_ASID_BITS (CR3_HW_ASID_BITS - PTI_CONSUMED_ASID_BITS)
+/*
+ * ASIDs are zero-based: 0->MAX_AVAIL_ASID are valid.  -1 below to account
+ * for them being zero-based.  Another -1 is because ASID 0 is reserved for
+ * use by non-PCID-aware users.
+ */
+#define MAX_ASID_AVAILABLE ((1 << CR3_AVAIL_ASID_BITS) - 2)
 
-/* Flush all mappings for a given PCID, not including globals. */
-static inline void invpcid_flush_single_context(unsigned long pcid)
+static inline u16 kern_pcid(u16 asid)
 {
-       __invpcid(pcid, 0, INVPCID_TYPE_SINGLE_CTXT);
+       VM_WARN_ON_ONCE(asid > MAX_ASID_AVAILABLE);
+       /*
+        * If PCID is on, ASID-aware code paths put the ASID+1 into the
+        * PCID bits.  This serves two purposes.  It prevents a nasty
+        * situation in which PCID-unaware code saves CR3, loads some other
+        * value (with PCID == 0), and then restores CR3, thus corrupting
+        * the TLB for ASID 0 if the saved ASID was nonzero.  It also means
+        * that any bugs involving loading a PCID-enabled CR3 with
+        * CR4.PCIDE off will trigger deterministically.
+        */
+       return asid + 1;
 }
 
-/* Flush all mappings, including globals, for all PCIDs. */
-static inline void invpcid_flush_all(void)
+struct pgd_t;
+static inline unsigned long build_cr3(pgd_t *pgd, u16 asid)
 {
-       __invpcid(0, 0, INVPCID_TYPE_ALL_INCL_GLOBAL);
+       if (static_cpu_has(X86_FEATURE_PCID)) {
+               return __sme_pa(pgd) | kern_pcid(asid);
+       } else {
+               VM_WARN_ON_ONCE(asid != 0);
+               return __sme_pa(pgd);
+       }
 }
 
-/* Flush all mappings for all PCIDs except globals. */
-static inline void invpcid_flush_all_nonglobals(void)
+static inline unsigned long build_cr3_noflush(pgd_t *pgd, u16 asid)
 {
-       __invpcid(0, 0, INVPCID_TYPE_ALL_NON_GLOBAL);
-}
-
-static inline u64 inc_mm_tlb_gen(struct mm_struct *mm)
-{
-       u64 new_tlb_gen;
-
-       /*
-        * Bump the generation count.  This also serves as a full barrier
-        * that synchronizes with switch_mm(): callers are required to order
-        * their read of mm_cpumask after their writes to the paging
-        * structures.
-        */
-       smp_mb__before_atomic();
-       new_tlb_gen = atomic64_inc_return(&mm->context.tlb_gen);
-       smp_mb__after_atomic();
-
-       return new_tlb_gen;
+       VM_WARN_ON_ONCE(asid > MAX_ASID_AVAILABLE);
+       VM_WARN_ON_ONCE(!this_cpu_has(X86_FEATURE_PCID));
+       return __sme_pa(pgd) | kern_pcid(asid) | CR3_NOFLUSH;
 }
 
 #ifdef CONFIG_PARAVIRT
@@ -237,6 +233,9 @@ static inline void cr4_set_bits_and_update_boot(unsigned long mask)
 
 extern void initialize_tlbstate_and_flush(void);
 
+/*
+ * flush the entire current user mapping
+ */
 static inline void __native_flush_tlb(void)
 {
        /*
@@ -249,20 +248,12 @@ static inline void __native_flush_tlb(void)
        preempt_enable();
 }
 
-static inline void __native_flush_tlb_global_irq_disabled(void)
-{
-       unsigned long cr4;
-
-       cr4 = this_cpu_read(cpu_tlbstate.cr4);
-       /* clear PGE */
-       native_write_cr4(cr4 & ~X86_CR4_PGE);
-       /* write old PGE again and flush TLBs */
-       native_write_cr4(cr4);
-}
-
+/*
+ * flush everything
+ */
 static inline void __native_flush_tlb_global(void)
 {
-       unsigned long flags;
+       unsigned long cr4, flags;
 
        if (static_cpu_has(X86_FEATURE_INVPCID)) {
                /*
@@ -280,22 +271,36 @@ static inline void __native_flush_tlb_global(void)
         */
        raw_local_irq_save(flags);
 
-       __native_flush_tlb_global_irq_disabled();
+       cr4 = this_cpu_read(cpu_tlbstate.cr4);
+       /* toggle PGE */
+       native_write_cr4(cr4 ^ X86_CR4_PGE);
+       /* write old PGE again and flush TLBs */
+       native_write_cr4(cr4);
 
        raw_local_irq_restore(flags);
 }
 
+/*
+ * flush one page in the user mapping
+ */
 static inline void __native_flush_tlb_single(unsigned long addr)
 {
        asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
 }
 
+/*
+ * flush everything
+ */
 static inline void __flush_tlb_all(void)
 {
-       if (boot_cpu_has(X86_FEATURE_PGE))
+       if (boot_cpu_has(X86_FEATURE_PGE)) {
                __flush_tlb_global();
-       else
+       } else {
+               /*
+                * !PGE -> !PCID (setup_pcid()), thus every flush is total.
+                */
                __flush_tlb();
+       }
 
        /*
         * Note: if we somehow had PCID but not PGE, then this wouldn't work --
@@ -306,6 +311,9 @@ static inline void __flush_tlb_all(void)
         */
 }
 
+/*
+ * flush one page in the kernel mapping
+ */
 static inline void __flush_tlb_one(unsigned long addr)
 {
        count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);
index 1fadd31..31051f3 100644 (file)
@@ -75,7 +75,6 @@ dotraplinkage void do_segment_not_present(struct pt_regs *, long);
 dotraplinkage void do_stack_segment(struct pt_regs *, long);
 #ifdef CONFIG_X86_64
 dotraplinkage void do_double_fault(struct pt_regs *, long);
-asmlinkage struct pt_regs *sync_regs(struct pt_regs *);
 #endif
 dotraplinkage void do_general_protection(struct pt_regs *, long);
 dotraplinkage void do_page_fault(struct pt_regs *, unsigned long);
index e9cc6fe..c1688c2 100644 (file)
@@ -7,6 +7,9 @@
 #include <asm/ptrace.h>
 #include <asm/stacktrace.h>
 
+#define IRET_FRAME_OFFSET (offsetof(struct pt_regs, ip))
+#define IRET_FRAME_SIZE   (sizeof(struct pt_regs) - IRET_FRAME_OFFSET)
+
 struct unwind_state {
        struct stack_info stack_info;
        unsigned long stack_mask;
@@ -52,6 +55,10 @@ void unwind_start(struct unwind_state *state, struct task_struct *task,
 }
 
 #if defined(CONFIG_UNWINDER_ORC) || defined(CONFIG_UNWINDER_FRAME_POINTER)
+/*
+ * WARNING: The entire pt_regs may not be safe to dereference.  In some cases,
+ * only the iret frame registers are accessible.  Use with caution!
+ */
 static inline struct pt_regs *unwind_get_entry_regs(struct unwind_state *state)
 {
        if (unwind_done(state))
index 8ea7827..676b7cf 100644 (file)
@@ -93,4 +93,10 @@ void common(void) {
 
        BLANK();
        DEFINE(PTREGS_SIZE, sizeof(struct pt_regs));
+
+       /* Layout info for cpu_entry_area */
+       OFFSET(CPU_ENTRY_AREA_tss, cpu_entry_area, tss);
+       OFFSET(CPU_ENTRY_AREA_entry_trampoline, cpu_entry_area, entry_trampoline);
+       OFFSET(CPU_ENTRY_AREA_entry_stack, cpu_entry_area, entry_stack_page);
+       DEFINE(SIZEOF_entry_stack, sizeof(struct entry_stack));
 }
index dedf428..fa1261e 100644 (file)
@@ -47,13 +47,8 @@ void foo(void)
        BLANK();
 
        /* Offset from the sysenter stack to tss.sp0 */
-       DEFINE(TSS_sysenter_sp0, offsetof(struct tss_struct, x86_tss.sp0) -
-              offsetofend(struct tss_struct, SYSENTER_stack));
-
-       /* Offset from cpu_tss to SYSENTER_stack */
-       OFFSET(CPU_TSS_SYSENTER_stack, tss_struct, SYSENTER_stack);
-       /* Size of SYSENTER_stack */
-       DEFINE(SIZEOF_SYSENTER_stack, sizeof(((struct tss_struct *)0)->SYSENTER_stack));
+       DEFINE(TSS_sysenter_sp0, offsetof(struct cpu_entry_area, tss.x86_tss.sp0) -
+              offsetofend(struct cpu_entry_area, entry_stack_page.stack));
 
 #ifdef CONFIG_CC_STACKPROTECTOR
        BLANK();
index 630212f..bf51e51 100644 (file)
@@ -23,6 +23,9 @@ int main(void)
 #ifdef CONFIG_PARAVIRT
        OFFSET(PV_CPU_usergs_sysret64, pv_cpu_ops, usergs_sysret64);
        OFFSET(PV_CPU_swapgs, pv_cpu_ops, swapgs);
+#ifdef CONFIG_DEBUG_ENTRY
+       OFFSET(PV_IRQ_save_fl, pv_irq_ops, save_fl);
+#endif
        BLANK();
 #endif
 
@@ -63,6 +66,7 @@ int main(void)
 
        OFFSET(TSS_ist, tss_struct, x86_tss.ist);
        OFFSET(TSS_sp0, tss_struct, x86_tss.sp0);
+       OFFSET(TSS_sp1, tss_struct, x86_tss.sp1);
        BLANK();
 
 #ifdef CONFIG_CC_STACKPROTECTOR
index fa998ca..c9757f0 100644 (file)
@@ -476,8 +476,8 @@ static const char *table_lookup_model(struct cpuinfo_x86 *c)
        return NULL;            /* Not found */
 }
 
-__u32 cpu_caps_cleared[NCAPINTS];
-__u32 cpu_caps_set[NCAPINTS];
+__u32 cpu_caps_cleared[NCAPINTS + NBUGINTS];
+__u32 cpu_caps_set[NCAPINTS + NBUGINTS];
 
 void load_percpu_segment(int cpu)
 {
@@ -490,28 +490,23 @@ void load_percpu_segment(int cpu)
        load_stack_canary_segment();
 }
 
-/* Setup the fixmap mapping only once per-processor */
-static inline void setup_fixmap_gdt(int cpu)
-{
-#ifdef CONFIG_X86_64
-       /* On 64-bit systems, we use a read-only fixmap GDT. */
-       pgprot_t prot = PAGE_KERNEL_RO;
-#else
-       /*
-        * On native 32-bit systems, the GDT cannot be read-only because
-        * our double fault handler uses a task gate, and entering through
-        * a task gate needs to change an available TSS to busy.  If the GDT
-        * is read-only, that will triple fault.
-        *
-        * On Xen PV, the GDT must be read-only because the hypervisor requires
-        * it.
-        */
-       pgprot_t prot = boot_cpu_has(X86_FEATURE_XENPV) ?
-               PAGE_KERNEL_RO : PAGE_KERNEL;
+#ifdef CONFIG_X86_32
+/* The 32-bit entry code needs to find cpu_entry_area. */
+DEFINE_PER_CPU(struct cpu_entry_area *, cpu_entry_area);
 #endif
 
-       __set_fixmap(get_cpu_gdt_ro_index(cpu), get_cpu_gdt_paddr(cpu), prot);
-}
+#ifdef CONFIG_X86_64
+/*
+ * Special IST stacks which the CPU switches to when it calls
+ * an IST-marked descriptor entry. Up to 7 stacks (hardware
+ * limit), all of them are 4K, except the debug stack which
+ * is 8K.
+ */
+static const unsigned int exception_stack_sizes[N_EXCEPTION_STACKS] = {
+         [0 ... N_EXCEPTION_STACKS - 1]        = EXCEPTION_STKSZ,
+         [DEBUG_STACK - 1]                     = DEBUG_STKSZ
+};
+#endif
 
 /* Load the original GDT from the per-cpu structure */
 void load_direct_gdt(int cpu)
@@ -747,7 +742,7 @@ static void apply_forced_caps(struct cpuinfo_x86 *c)
 {
        int i;
 
-       for (i = 0; i < NCAPINTS; i++) {
+       for (i = 0; i < NCAPINTS + NBUGINTS; i++) {
                c->x86_capability[i] &= ~cpu_caps_cleared[i];
                c->x86_capability[i] |= cpu_caps_set[i];
        }
@@ -1250,7 +1245,7 @@ void enable_sep_cpu(void)
                return;
 
        cpu = get_cpu();
-       tss = &per_cpu(cpu_tss, cpu);
+       tss = &per_cpu(cpu_tss_rw, cpu);
 
        /*
         * We cache MSR_IA32_SYSENTER_CS's value in the TSS's ss1 field --
@@ -1259,11 +1254,7 @@ void enable_sep_cpu(void)
 
        tss->x86_tss.ss1 = __KERNEL_CS;
        wrmsr(MSR_IA32_SYSENTER_CS, tss->x86_tss.ss1, 0);
-
-       wrmsr(MSR_IA32_SYSENTER_ESP,
-             (unsigned long)tss + offsetofend(struct tss_struct, SYSENTER_stack),
-             0);
-
+       wrmsr(MSR_IA32_SYSENTER_ESP, (unsigned long)(cpu_entry_stack(cpu) + 1), 0);
        wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long)entry_SYSENTER_32, 0);
 
        put_cpu();
@@ -1357,25 +1348,19 @@ DEFINE_PER_CPU(unsigned int, irq_count) __visible = -1;
 DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT;
 EXPORT_PER_CPU_SYMBOL(__preempt_count);
 
-/*
- * Special IST stacks which the CPU switches to when it calls
- * an IST-marked descriptor entry. Up to 7 stacks (hardware
- * limit), all of them are 4K, except the debug stack which
- * is 8K.
- */
-static const unsigned int exception_stack_sizes[N_EXCEPTION_STACKS] = {
-         [0 ... N_EXCEPTION_STACKS - 1]        = EXCEPTION_STKSZ,
-         [DEBUG_STACK - 1]                     = DEBUG_STKSZ
-};
-
-static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks
-       [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]);
-
 /* May not be marked __init: used by software suspend */
 void syscall_init(void)
 {
+       extern char _entry_trampoline[];
+       extern char entry_SYSCALL_64_trampoline[];
+
+       int cpu = smp_processor_id();
+       unsigned long SYSCALL64_entry_trampoline =
+               (unsigned long)get_cpu_entry_area(cpu)->entry_trampoline +
+               (entry_SYSCALL_64_trampoline - _entry_trampoline);
+
        wrmsr(MSR_STAR, 0, (__USER32_CS << 16) | __KERNEL_CS);
-       wrmsrl(MSR_LSTAR, (unsigned long)entry_SYSCALL_64);
+       wrmsrl(MSR_LSTAR, SYSCALL64_entry_trampoline);
 
 #ifdef CONFIG_IA32_EMULATION
        wrmsrl(MSR_CSTAR, (unsigned long)entry_SYSCALL_compat);
@@ -1386,7 +1371,7 @@ void syscall_init(void)
         * AMD doesn't allow SYSENTER in long mode (either 32- or 64-bit).
         */
        wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS);
-       wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL);
+       wrmsrl_safe(MSR_IA32_SYSENTER_ESP, (unsigned long)(cpu_entry_stack(cpu) + 1));
        wrmsrl_safe(MSR_IA32_SYSENTER_EIP, (u64)entry_SYSENTER_compat);
 #else
        wrmsrl(MSR_CSTAR, (unsigned long)ignore_sysret);
@@ -1530,7 +1515,7 @@ void cpu_init(void)
        if (cpu)
                load_ucode_ap();
 
-       t = &per_cpu(cpu_tss, cpu);
+       t = &per_cpu(cpu_tss_rw, cpu);
        oist = &per_cpu(orig_ist, cpu);
 
 #ifdef CONFIG_NUMA
@@ -1569,7 +1554,7 @@ void cpu_init(void)
         * set up and load the per-CPU TSS
         */
        if (!oist->ist[0]) {
-               char *estacks = per_cpu(exception_stacks, cpu);
+               char *estacks = get_cpu_entry_area(cpu)->exception_stacks;
 
                for (v = 0; v < N_EXCEPTION_STACKS; v++) {
                        estacks += exception_stack_sizes[v];
@@ -1580,7 +1565,7 @@ void cpu_init(void)
                }
        }
 
-       t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap);
+       t->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET;
 
        /*
         * <= is required because the CPU will access up to
@@ -1596,11 +1581,12 @@ void cpu_init(void)
        enter_lazy_tlb(&init_mm, me);
 
        /*
-        * Initialize the TSS.  Don't bother initializing sp0, as the initial
-        * task never enters user mode.
+        * Initialize the TSS.  sp0 points to the entry trampoline stack
+        * regardless of what task is running.
         */
-       set_tss_desc(cpu, t);
+       set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss);
        load_TR_desc();
+       load_sp0((unsigned long)(cpu_entry_stack(cpu) + 1));
 
        load_mm_ldt(&init_mm);
 
@@ -1612,7 +1598,6 @@ void cpu_init(void)
        if (is_uv_system())
                uv_cpu_init();
 
-       setup_fixmap_gdt(cpu);
        load_fixmap_gdt(cpu);
 }
 
@@ -1622,7 +1607,7 @@ void cpu_init(void)
 {
        int cpu = smp_processor_id();
        struct task_struct *curr = current;
-       struct tss_struct *t = &per_cpu(cpu_tss, cpu);
+       struct tss_struct *t = &per_cpu(cpu_tss_rw, cpu);
 
        wait_for_master_cpu(cpu);
 
@@ -1657,12 +1642,12 @@ void cpu_init(void)
         * Initialize the TSS.  Don't bother initializing sp0, as the initial
         * task never enters user mode.
         */
-       set_tss_desc(cpu, t);
+       set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss);
        load_TR_desc();
 
        load_mm_ldt(&init_mm);
 
-       t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap);
+       t->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET;
 
 #ifdef CONFIG_DOUBLEFAULT
        /* Set up doublefault TSS pointer in the GDT */
@@ -1674,7 +1659,6 @@ void cpu_init(void)
 
        fpu__init_cpu();
 
-       setup_fixmap_gdt(cpu);
        load_fixmap_gdt(cpu);
 }
 #endif
index 7dbcb7a..8ccdca6 100644 (file)
@@ -565,15 +565,6 @@ static void print_ucode(struct ucode_cpu_info *uci)
 }
 #else
 
-/*
- * Flush global tlb. We only do this in x86_64 where paging has been enabled
- * already and PGE should be enabled as well.
- */
-static inline void flush_tlb_early(void)
-{
-       __native_flush_tlb_global_irq_disabled();
-}
-
 static inline void print_ucode(struct ucode_cpu_info *uci)
 {
        struct microcode_intel *mc;
@@ -602,10 +593,6 @@ static int apply_microcode_early(struct ucode_cpu_info *uci, bool early)
        if (rev != mc->hdr.rev)
                return -1;
 
-#ifdef CONFIG_X86_64
-       /* Flush global tlb. This is precaution. */
-       flush_tlb_early();
-#endif
        uci->cpu_sig.rev = rev;
 
        if (early)
index 0e662c5..0b8cedb 100644 (file)
@@ -50,25 +50,23 @@ static void doublefault_fn(void)
                cpu_relax();
 }
 
-struct tss_struct doublefault_tss __cacheline_aligned = {
-       .x86_tss = {
-               .sp0            = STACK_START,
-               .ss0            = __KERNEL_DS,
-               .ldt            = 0,
-               .io_bitmap_base = INVALID_IO_BITMAP_OFFSET,
-
-               .ip             = (unsigned long) doublefault_fn,
-               /* 0x2 bit is always set */
-               .flags          = X86_EFLAGS_SF | 0x2,
-               .sp             = STACK_START,
-               .es             = __USER_DS,
-               .cs             = __KERNEL_CS,
-               .ss             = __KERNEL_DS,
-               .ds             = __USER_DS,
-               .fs             = __KERNEL_PERCPU,
-
-               .__cr3          = __pa_nodebug(swapper_pg_dir),
-       }
+struct x86_hw_tss doublefault_tss __cacheline_aligned = {
+       .sp0            = STACK_START,
+       .ss0            = __KERNEL_DS,
+       .ldt            = 0,
+       .io_bitmap_base = INVALID_IO_BITMAP_OFFSET,
+
+       .ip             = (unsigned long) doublefault_fn,
+       /* 0x2 bit is always set */
+       .flags          = X86_EFLAGS_SF | 0x2,
+       .sp             = STACK_START,
+       .es             = __USER_DS,
+       .cs             = __KERNEL_CS,
+       .ss             = __KERNEL_DS,
+       .ds             = __USER_DS,
+       .fs             = __KERNEL_PERCPU,
+
+       .__cr3          = __pa_nodebug(swapper_pg_dir),
 };
 
 /* dummy for do_double_fault() call */
index f13b4c0..36b17e0 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/nmi.h>
 #include <linux/sysfs.h>
 
+#include <asm/cpu_entry_area.h>
 #include <asm/stacktrace.h>
 #include <asm/unwind.h>
 
@@ -43,6 +44,24 @@ bool in_task_stack(unsigned long *stack, struct task_struct *task,
        return true;
 }
 
+bool in_entry_stack(unsigned long *stack, struct stack_info *info)
+{
+       struct entry_stack *ss = cpu_entry_stack(smp_processor_id());
+
+       void *begin = ss;
+       void *end = ss + 1;
+
+       if ((void *)stack < begin || (void *)stack >= end)
+               return false;
+
+       info->type      = STACK_TYPE_ENTRY;
+       info->begin     = begin;
+       info->end       = end;
+       info->next_sp   = NULL;
+
+       return true;
+}
+
 static void printk_stack_address(unsigned long address, int reliable,
                                 char *log_lvl)
 {
@@ -50,6 +69,28 @@ static void printk_stack_address(unsigned long address, int reliable,
        printk("%s %s%pB\n", log_lvl, reliable ? "" : "? ", (void *)address);
 }
 
+void show_iret_regs(struct pt_regs *regs)
+{
+       printk(KERN_DEFAULT "RIP: %04x:%pS\n", (int)regs->cs, (void *)regs->ip);
+       printk(KERN_DEFAULT "RSP: %04x:%016lx EFLAGS: %08lx", (int)regs->ss,
+               regs->sp, regs->flags);
+}
+
+static void show_regs_safe(struct stack_info *info, struct pt_regs *regs)
+{
+       if (on_stack(info, regs, sizeof(*regs)))
+               __show_regs(regs, 0);
+       else if (on_stack(info, (void *)regs + IRET_FRAME_OFFSET,
+                         IRET_FRAME_SIZE)) {
+               /*
+                * When an interrupt or exception occurs in entry code, the
+                * full pt_regs might not have been saved yet.  In that case
+                * just print the iret frame.
+                */
+               show_iret_regs(regs);
+       }
+}
+
 void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
                        unsigned long *stack, char *log_lvl)
 {
@@ -71,31 +112,35 @@ void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
         * - task stack
         * - interrupt stack
         * - HW exception stacks (double fault, nmi, debug, mce)
+        * - entry stack
         *
-        * x86-32 can have up to three stacks:
+        * x86-32 can have up to four stacks:
         * - task stack
         * - softirq stack
         * - hardirq stack
+        * - entry stack
         */
        for (regs = NULL; stack; stack = PTR_ALIGN(stack_info.next_sp, sizeof(long))) {
                const char *stack_name;
 
-               /*
-                * If we overflowed the task stack into a guard page, jump back
-                * to the bottom of the usable stack.
-                */
-               if (task_stack_page(task) - (void *)stack < PAGE_SIZE)
-                       stack = task_stack_page(task);
-
-               if (get_stack_info(stack, task, &stack_info, &visit_mask))
-                       break;
+               if (get_stack_info(stack, task, &stack_info, &visit_mask)) {
+                       /*
+                        * We weren't on a valid stack.  It's possible that
+                        * we overflowed a valid stack into a guard page.
+                        * See if the next page up is valid so that we can
+                        * generate some kind of backtrace if this happens.
+                        */
+                       stack = (unsigned long *)PAGE_ALIGN((unsigned long)stack);
+                       if (get_stack_info(stack, task, &stack_info, &visit_mask))
+                               break;
+               }
 
                stack_name = stack_type_name(stack_info.type);
                if (stack_name)
                        printk("%s <%s>\n", log_lvl, stack_name);
 
-               if (regs && on_stack(&stack_info, regs, sizeof(*regs)))
-                       __show_regs(regs, 0);
+               if (regs)
+                       show_regs_safe(&stack_info, regs);
 
                /*
                 * Scan the stack, printing any text addresses we find.  At the
@@ -119,7 +164,7 @@ void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
 
                        /*
                         * Don't print regs->ip again if it was already printed
-                        * by __show_regs() below.
+                        * by show_regs_safe() below.
                         */
                        if (regs && stack == &regs->ip)
                                goto next;
@@ -155,8 +200,8 @@ next:
 
                        /* if the frame has entry regs, print them */
                        regs = unwind_get_entry_regs(&state);
-                       if (regs && on_stack(&stack_info, regs, sizeof(*regs)))
-                               __show_regs(regs, 0);
+                       if (regs)
+                               show_regs_safe(&stack_info, regs);
                }
 
                if (stack_name)
index daefae8..04170f6 100644 (file)
@@ -26,6 +26,9 @@ const char *stack_type_name(enum stack_type type)
        if (type == STACK_TYPE_SOFTIRQ)
                return "SOFTIRQ";
 
+       if (type == STACK_TYPE_ENTRY)
+               return "ENTRY_TRAMPOLINE";
+
        return NULL;
 }
 
@@ -93,6 +96,9 @@ int get_stack_info(unsigned long *stack, struct task_struct *task,
        if (task != current)
                goto unknown;
 
+       if (in_entry_stack(stack, info))
+               goto recursion_check;
+
        if (in_hardirq_stack(stack, info))
                goto recursion_check;
 
index 88ce2ff..563e28d 100644 (file)
@@ -37,6 +37,15 @@ const char *stack_type_name(enum stack_type type)
        if (type == STACK_TYPE_IRQ)
                return "IRQ";
 
+       if (type == STACK_TYPE_ENTRY) {
+               /*
+                * On 64-bit, we have a generic entry stack that we
+                * use for all the kernel entry points, including
+                * SYSENTER.
+                */
+               return "ENTRY_TRAMPOLINE";
+       }
+
        if (type >= STACK_TYPE_EXCEPTION && type <= STACK_TYPE_EXCEPTION_LAST)
                return exception_stack_names[type - STACK_TYPE_EXCEPTION];
 
@@ -115,6 +124,9 @@ int get_stack_info(unsigned long *stack, struct task_struct *task,
        if (in_irq_stack(stack, info))
                goto recursion_check;
 
+       if (in_entry_stack(stack, info))
+               goto recursion_check;
+
        goto unknown;
 
 recursion_check:
index 3feb648..2f72330 100644 (file)
@@ -67,7 +67,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
         * because the ->io_bitmap_max value must match the bitmap
         * contents:
         */
-       tss = &per_cpu(cpu_tss, get_cpu());
+       tss = &per_cpu(cpu_tss_rw, get_cpu());
 
        if (turn_on)
                bitmap_clear(t->io_bitmap_ptr, from, num);
index 49cfd9f..68e1867 100644 (file)
@@ -219,18 +219,6 @@ __visible unsigned int __irq_entry do_IRQ(struct pt_regs *regs)
        /* high bit used in ret_from_ code  */
        unsigned vector = ~regs->orig_ax;
 
-       /*
-        * NB: Unlike exception entries, IRQ entries do not reliably
-        * handle context tracking in the low-level entry code.  This is
-        * because syscall entries execute briefly with IRQs on before
-        * updating context tracking state, so we can take an IRQ from
-        * kernel mode with CONTEXT_USER.  The low-level entry code only
-        * updates the context if we came from user mode, so we won't
-        * switch to CONTEXT_KERNEL.  We'll fix that once the syscall
-        * code is cleaned up enough that we can cleanly defer enabling
-        * IRQs.
-        */
-
        entering_irq();
 
        /* entering_irq() tells RCU that we're not quiescent.  Check it. */
index 020efbf..d86e344 100644 (file)
@@ -57,10 +57,10 @@ static inline void stack_overflow_check(struct pt_regs *regs)
        if (regs->sp >= estack_top && regs->sp <= estack_bottom)
                return;
 
-       WARN_ONCE(1, "do_IRQ(): %s has overflown the kernel stack (cur:%Lx,sp:%lx,irq stk top-bottom:%Lx-%Lx,exception stk top-bottom:%Lx-%Lx)\n",
+       WARN_ONCE(1, "do_IRQ(): %s has overflown the kernel stack (cur:%Lx,sp:%lx,irq stk top-bottom:%Lx-%Lx,exception stk top-bottom:%Lx-%Lx,ip:%pF)\n",
                current->comm, curbase, regs->sp,
                irq_stack_top, irq_stack_bottom,
-               estack_top, estack_bottom);
+               estack_top, estack_bottom, (void *)regs->ip);
 
        if (sysctl_panic_on_stackoverflow)
                panic("low stack detected by irq handler - check messages\n");
index 1c1eae9..a6b5d62 100644 (file)
@@ -5,6 +5,11 @@
  * Copyright (C) 2002 Andi Kleen
  *
  * This handles calls from both 32bit and 64bit mode.
+ *
+ * Lock order:
+ *     contex.ldt_usr_sem
+ *       mmap_sem
+ *         context.lock
  */
 
 #include <linux/errno.h>
@@ -42,7 +47,7 @@ static void refresh_ldt_segments(void)
 #endif
 }
 
-/* context.lock is held for us, so we don't need any locking. */
+/* context.lock is held by the task which issued the smp function call */
 static void flush_ldt(void *__mm)
 {
        struct mm_struct *mm = __mm;
@@ -99,15 +104,17 @@ static void finalize_ldt_struct(struct ldt_struct *ldt)
        paravirt_alloc_ldt(ldt->entries, ldt->nr_entries);
 }
 
-/* context.lock is held */
-static void install_ldt(struct mm_struct *current_mm,
-                       struct ldt_struct *ldt)
+static void install_ldt(struct mm_struct *mm, struct ldt_struct *ldt)
 {
+       mutex_lock(&mm->context.lock);
+
        /* Synchronizes with READ_ONCE in load_mm_ldt. */
-       smp_store_release(&current_mm->context.ldt, ldt);
+       smp_store_release(&mm->context.ldt, ldt);
 
-       /* Activate the LDT for all CPUs using current_mm. */
-       on_each_cpu_mask(mm_cpumask(current_mm), flush_ldt, current_mm, true);
+       /* Activate the LDT for all CPUs using currents mm. */
+       on_each_cpu_mask(mm_cpumask(mm), flush_ldt, mm, true);
+
+       mutex_unlock(&mm->context.lock);
 }
 
 static void free_ldt_struct(struct ldt_struct *ldt)
@@ -124,27 +131,20 @@ static void free_ldt_struct(struct ldt_struct *ldt)
 }
 
 /*
- * we do not have to muck with descriptors here, that is
- * done in switch_mm() as needed.
+ * Called on fork from arch_dup_mmap(). Just copy the current LDT state,
+ * the new task is not running, so nothing can be installed.
  */
-int init_new_context_ldt(struct task_struct *tsk, struct mm_struct *mm)
+int ldt_dup_context(struct mm_struct *old_mm, struct mm_struct *mm)
 {
        struct ldt_struct *new_ldt;
-       struct mm_struct *old_mm;
        int retval = 0;
 
-       mutex_init(&mm->context.lock);
-       old_mm = current->mm;
-       if (!old_mm) {
-               mm->context.ldt = NULL;
+       if (!old_mm)
                return 0;
-       }
 
        mutex_lock(&old_mm->context.lock);
-       if (!old_mm->context.ldt) {
-               mm->context.ldt = NULL;
+       if (!old_mm->context.ldt)
                goto out_unlock;
-       }
 
        new_ldt = alloc_ldt_struct(old_mm->context.ldt->nr_entries);
        if (!new_ldt) {
@@ -180,7 +180,7 @@ static int read_ldt(void __user *ptr, unsigned long bytecount)
        unsigned long entries_size;
        int retval;
 
-       mutex_lock(&mm->context.lock);
+       down_read(&mm->context.ldt_usr_sem);
 
        if (!mm->context.ldt) {
                retval = 0;
@@ -209,7 +209,7 @@ static int read_ldt(void __user *ptr, unsigned long bytecount)
        retval = bytecount;
 
 out_unlock:
-       mutex_unlock(&mm->context.lock);
+       up_read(&mm->context.ldt_usr_sem);
        return retval;
 }
 
@@ -269,7 +269,8 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
                        ldt.avl = 0;
        }
 
-       mutex_lock(&mm->context.lock);
+       if (down_write_killable(&mm->context.ldt_usr_sem))
+               return -EINTR;
 
        old_ldt       = mm->context.ldt;
        old_nr_entries = old_ldt ? old_ldt->nr_entries : 0;
@@ -291,7 +292,7 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
        error = 0;
 
 out_unlock:
-       mutex_unlock(&mm->context.lock);
+       up_write(&mm->context.ldt_usr_sem);
 out:
        return error;
 }
index ac0be82..9edadab 100644 (file)
@@ -10,7 +10,6 @@ DEF_NATIVE(pv_irq_ops, save_fl, "pushfq; popq %rax");
 DEF_NATIVE(pv_mmu_ops, read_cr2, "movq %cr2, %rax");
 DEF_NATIVE(pv_mmu_ops, read_cr3, "movq %cr3, %rax");
 DEF_NATIVE(pv_mmu_ops, write_cr3, "movq %rdi, %cr3");
-DEF_NATIVE(pv_mmu_ops, flush_tlb_single, "invlpg (%rdi)");
 DEF_NATIVE(pv_cpu_ops, wbinvd, "wbinvd");
 
 DEF_NATIVE(pv_cpu_ops, usergs_sysret64, "swapgs; sysretq");
@@ -60,7 +59,6 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
                PATCH_SITE(pv_mmu_ops, read_cr2);
                PATCH_SITE(pv_mmu_ops, read_cr3);
                PATCH_SITE(pv_mmu_ops, write_cr3);
-               PATCH_SITE(pv_mmu_ops, flush_tlb_single);
                PATCH_SITE(pv_cpu_ops, wbinvd);
 #if defined(CONFIG_PARAVIRT_SPINLOCKS)
                case PARAVIRT_PATCH(pv_lock_ops.queued_spin_unlock):
index bb988a2..aed9d94 100644 (file)
@@ -47,7 +47,7 @@
  * section. Since TSS's are completely CPU-local, we want them
  * on exact cacheline boundaries, to eliminate cacheline ping-pong.
  */
-__visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss) = {
+__visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss_rw) = {
        .x86_tss = {
                /*
                 * .sp0 is only used when entering ring 0 from a lower
@@ -56,6 +56,16 @@ __visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss) = {
                 * Poison it.
                 */
                .sp0 = (1UL << (BITS_PER_LONG-1)) + 1,
+
+#ifdef CONFIG_X86_64
+               /*
+                * .sp1 is cpu_current_top_of_stack.  The init task never
+                * runs user code, but cpu_current_top_of_stack should still
+                * be well defined before the first context switch.
+                */
+               .sp1 = TOP_OF_INIT_STACK,
+#endif
+
 #ifdef CONFIG_X86_32
                .ss0 = __KERNEL_DS,
                .ss1 = __KERNEL_CS,
@@ -71,11 +81,8 @@ __visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss) = {
          */
        .io_bitmap              = { [0 ... IO_BITMAP_LONGS] = ~0 },
 #endif
-#ifdef CONFIG_X86_32
-       .SYSENTER_stack_canary  = STACK_END_MAGIC,
-#endif
 };
-EXPORT_PER_CPU_SYMBOL(cpu_tss);
+EXPORT_PER_CPU_SYMBOL(cpu_tss_rw);
 
 DEFINE_PER_CPU(bool, __tss_limit_invalid);
 EXPORT_PER_CPU_SYMBOL_GPL(__tss_limit_invalid);
@@ -104,7 +111,7 @@ void exit_thread(struct task_struct *tsk)
        struct fpu *fpu = &t->fpu;
 
        if (bp) {
-               struct tss_struct *tss = &per_cpu(cpu_tss, get_cpu());
+               struct tss_struct *tss = &per_cpu(cpu_tss_rw, get_cpu());
 
                t->io_bitmap_ptr = NULL;
                clear_thread_flag(TIF_IO_BITMAP);
index 45bf0c5..5224c60 100644 (file)
@@ -234,7 +234,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
        struct fpu *prev_fpu = &prev->fpu;
        struct fpu *next_fpu = &next->fpu;
        int cpu = smp_processor_id();
-       struct tss_struct *tss = &per_cpu(cpu_tss, cpu);
+       struct tss_struct *tss = &per_cpu(cpu_tss_rw, cpu);
 
        /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
 
index eeeb34f..c754662 100644 (file)
@@ -69,9 +69,8 @@ void __show_regs(struct pt_regs *regs, int all)
        unsigned int fsindex, gsindex;
        unsigned int ds, cs, es;
 
-       printk(KERN_DEFAULT "RIP: %04lx:%pS\n", regs->cs, (void *)regs->ip);
-       printk(KERN_DEFAULT "RSP: %04lx:%016lx EFLAGS: %08lx", regs->ss,
-               regs->sp, regs->flags);
+       show_iret_regs(regs);
+
        if (regs->orig_ax != -1)
                pr_cont(" ORIG_RAX: %016lx\n", regs->orig_ax);
        else
@@ -88,6 +87,9 @@ void __show_regs(struct pt_regs *regs, int all)
        printk(KERN_DEFAULT "R13: %016lx R14: %016lx R15: %016lx\n",
               regs->r13, regs->r14, regs->r15);
 
+       if (!all)
+               return;
+
        asm("movl %%ds,%0" : "=r" (ds));
        asm("movl %%cs,%0" : "=r" (cs));
        asm("movl %%es,%0" : "=r" (es));
@@ -98,9 +100,6 @@ void __show_regs(struct pt_regs *regs, int all)
        rdmsrl(MSR_GS_BASE, gs);
        rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
 
-       if (!all)
-               return;
-
        cr0 = read_cr0();
        cr2 = read_cr2();
        cr3 = __read_cr3();
@@ -400,7 +399,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
        struct fpu *prev_fpu = &prev->fpu;
        struct fpu *next_fpu = &next->fpu;
        int cpu = smp_processor_id();
-       struct tss_struct *tss = &per_cpu(cpu_tss, cpu);
+       struct tss_struct *tss = &per_cpu(cpu_tss_rw, cpu);
 
        WARN_ON_ONCE(IS_ENABLED(CONFIG_DEBUG_ENTRY) &&
                     this_cpu_read(irq_count) != -1);
@@ -462,6 +461,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
         * Switch the PDA and FPU contexts.
         */
        this_cpu_write(current_task, next_p);
+       this_cpu_write(cpu_current_top_of_stack, task_top_of_stack(next_p));
 
        /* Reload sp0. */
        update_sp0(next_p);
index 35cb209..c5970ef 100644 (file)
@@ -932,12 +932,8 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle,
        initial_code = (unsigned long)start_secondary;
        initial_stack  = idle->thread.sp;
 
-       /*
-        * Enable the espfix hack for this CPU
-       */
-#ifdef CONFIG_X86_ESPFIX64
+       /* Enable the espfix hack for this CPU */
        init_espfix_ap(cpu);
-#endif
 
        /* So we see what's up */
        announce_cpu(cpu, apicid);
index 989514c..f69dbd4 100644 (file)
@@ -51,6 +51,7 @@
 #include <asm/traps.h>
 #include <asm/desc.h>
 #include <asm/fpu/internal.h>
+#include <asm/cpu_entry_area.h>
 #include <asm/mce.h>
 #include <asm/fixmap.h>
 #include <asm/mach_traps.h>
@@ -348,9 +349,15 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
 
        /*
         * If IRET takes a non-IST fault on the espfix64 stack, then we
-        * end up promoting it to a doublefault.  In that case, modify
-        * the stack to make it look like we just entered the #GP
-        * handler from user space, similar to bad_iret.
+        * end up promoting it to a doublefault.  In that case, take
+        * advantage of the fact that we're not using the normal (TSS.sp0)
+        * stack right now.  We can write a fake #GP(0) frame at TSS.sp0
+        * and then modify our own IRET frame so that, when we return,
+        * we land directly at the #GP(0) vector with the stack already
+        * set up according to its expectations.
+        *
+        * The net result is that our #GP handler will think that we
+        * entered from usermode with the bad user context.
         *
         * No need for ist_enter here because we don't use RCU.
         */
@@ -358,13 +365,26 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
                regs->cs == __KERNEL_CS &&
                regs->ip == (unsigned long)native_irq_return_iret)
        {
-               struct pt_regs *normal_regs = task_pt_regs(current);
+               struct pt_regs *gpregs = (struct pt_regs *)this_cpu_read(cpu_tss_rw.x86_tss.sp0) - 1;
 
-               /* Fake a #GP(0) from userspace. */
-               memmove(&normal_regs->ip, (void *)regs->sp, 5*8);
-               normal_regs->orig_ax = 0;  /* Missing (lost) #GP error code */
+               /*
+                * regs->sp points to the failing IRET frame on the
+                * ESPFIX64 stack.  Copy it to the entry stack.  This fills
+                * in gpregs->ss through gpregs->ip.
+                *
+                */
+               memmove(&gpregs->ip, (void *)regs->sp, 5*8);
+               gpregs->orig_ax = 0;  /* Missing (lost) #GP error code */
+
+               /*
+                * Adjust our frame so that we return straight to the #GP
+                * vector with the expected RSP value.  This is safe because
+                * we won't enable interupts or schedule before we invoke
+                * general_protection, so nothing will clobber the stack
+                * frame we just set up.
+                */
                regs->ip = (unsigned long)general_protection;
-               regs->sp = (unsigned long)&normal_regs->orig_ax;
+               regs->sp = (unsigned long)&gpregs->orig_ax;
 
                return;
        }
@@ -389,7 +409,7 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
         *
         *   Processors update CR2 whenever a page fault is detected. If a
         *   second page fault occurs while an earlier page fault is being
-        *   delivered, the faulting linear address of the second fault will
+        *   delivered, the faulting linear address of the second fault will
         *   overwrite the contents of CR2 (replacing the previous
         *   address). These updates to CR2 occur even if the page fault
         *   results in a double fault or occurs during the delivery of a
@@ -605,14 +625,15 @@ NOKPROBE_SYMBOL(do_int3);
 
 #ifdef CONFIG_X86_64
 /*
- * Help handler running on IST stack to switch off the IST stack if the
- * interrupted code was in user mode. The actual stack switch is done in
- * entry_64.S
+ * Help handler running on a per-cpu (IST or entry trampoline) stack
+ * to switch to the normal thread stack if the interrupted code was in
+ * user mode. The actual stack switch is done in entry_64.S
  */
 asmlinkage __visible notrace struct pt_regs *sync_regs(struct pt_regs *eregs)
 {
-       struct pt_regs *regs = task_pt_regs(current);
-       *regs = *eregs;
+       struct pt_regs *regs = (struct pt_regs *)this_cpu_read(cpu_current_top_of_stack) - 1;
+       if (regs != eregs)
+               *regs = *eregs;
        return regs;
 }
 NOKPROBE_SYMBOL(sync_regs);
@@ -628,13 +649,13 @@ struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s)
        /*
         * This is called from entry_64.S early in handling a fault
         * caused by a bad iret to user mode.  To handle the fault
-        * correctly, we want move our stack frame to task_pt_regs
-        * and we want to pretend that the exception came from the
-        * iret target.
+        * correctly, we want to move our stack frame to where it would
+        * be had we entered directly on the entry stack (rather than
+        * just below the IRET frame) and we want to pretend that the
+        * exception came from the IRET target.
         */
        struct bad_iret_stack *new_stack =
-               container_of(task_pt_regs(current),
-                            struct bad_iret_stack, regs);
+               (struct bad_iret_stack *)this_cpu_read(cpu_tss_rw.x86_tss.sp0) - 1;
 
        /* Copy the IRET target to the new stack. */
        memmove(&new_stack->regs.ip, (void *)s->regs.sp, 5*8);
@@ -795,14 +816,6 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
        debug_stack_usage_dec();
 
 exit:
-#if defined(CONFIG_X86_32)
-       /*
-        * This is the most likely code path that involves non-trivial use
-        * of the SYSENTER stack.  Check that we haven't overrun it.
-        */
-       WARN(this_cpu_read(cpu_tss.SYSENTER_stack_canary) != STACK_END_MAGIC,
-            "Overran or corrupted SYSENTER stack\n");
-#endif
        ist_exit(regs);
 }
 NOKPROBE_SYMBOL(do_debug);
@@ -929,6 +942,9 @@ dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code)
 
 void __init trap_init(void)
 {
+       /* Init cpu_entry_area before IST entries are set up */
+       setup_cpu_entry_areas();
+
        idt_setup_traps();
 
        /*
@@ -936,8 +952,9 @@ void __init trap_init(void)
         * "sidt" instruction will not leak the location of the kernel, and
         * to defend the IDT against arbitrary memory write vulnerabilities.
         * It will be reloaded in cpu_init() */
-       __set_fixmap(FIX_RO_IDT, __pa_symbol(idt_table), PAGE_KERNEL_RO);
-       idt_descr.address = fix_to_virt(FIX_RO_IDT);
+       cea_set_pte(CPU_ENTRY_AREA_RO_IDT_VADDR, __pa_symbol(idt_table),
+                   PAGE_KERNEL_RO);
+       idt_descr.address = CPU_ENTRY_AREA_RO_IDT;
 
        /*
         * Should be a barrier for any external CPU state:
index a3f973b..be86a86 100644 (file)
@@ -253,22 +253,15 @@ unsigned long *unwind_get_return_address_ptr(struct unwind_state *state)
        return NULL;
 }
 
-static bool stack_access_ok(struct unwind_state *state, unsigned long addr,
+static bool stack_access_ok(struct unwind_state *state, unsigned long _addr,
                            size_t len)
 {
        struct stack_info *info = &state->stack_info;
+       void *addr = (void *)_addr;
 
-       /*
-        * If the address isn't on the current stack, switch to the next one.
-        *
-        * We may have to traverse multiple stacks to deal with the possibility
-        * that info->next_sp could point to an empty stack and the address
-        * could be on a subsequent stack.
-        */
-       while (!on_stack(info, (void *)addr, len))
-               if (get_stack_info(info->next_sp, state->task, info,
-                                  &state->stack_mask))
-                       return false;
+       if (!on_stack(info, addr, len) &&
+           (get_stack_info(addr, state->task, info, &state->stack_mask)))
+               return false;
 
        return true;
 }
@@ -283,42 +276,32 @@ static bool deref_stack_reg(struct unwind_state *state, unsigned long addr,
        return true;
 }
 
-#define REGS_SIZE (sizeof(struct pt_regs))
-#define SP_OFFSET (offsetof(struct pt_regs, sp))
-#define IRET_REGS_SIZE (REGS_SIZE - offsetof(struct pt_regs, ip))
-#define IRET_SP_OFFSET (SP_OFFSET - offsetof(struct pt_regs, ip))
-
 static bool deref_stack_regs(struct unwind_state *state, unsigned long addr,
-                            unsigned long *ip, unsigned long *sp, bool full)
+                            unsigned long *ip, unsigned long *sp)
 {
-       size_t regs_size = full ? REGS_SIZE : IRET_REGS_SIZE;
-       size_t sp_offset = full ? SP_OFFSET : IRET_SP_OFFSET;
-       struct pt_regs *regs = (struct pt_regs *)(addr + regs_size - REGS_SIZE);
-
-       if (IS_ENABLED(CONFIG_X86_64)) {
-               if (!stack_access_ok(state, addr, regs_size))
-                       return false;
+       struct pt_regs *regs = (struct pt_regs *)addr;
 
-               *ip = regs->ip;
-               *sp = regs->sp;
+       /* x86-32 support will be more complicated due to the &regs->sp hack */
+       BUILD_BUG_ON(IS_ENABLED(CONFIG_X86_32));
 
-               return true;
-       }
-
-       if (!stack_access_ok(state, addr, sp_offset))
+       if (!stack_access_ok(state, addr, sizeof(struct pt_regs)))
                return false;
 
        *ip = regs->ip;
+       *sp = regs->sp;
+       return true;
+}
 
-       if (user_mode(regs)) {
-               if (!stack_access_ok(state, addr + sp_offset,
-                                    REGS_SIZE - SP_OFFSET))
-                       return false;
+static bool deref_stack_iret_regs(struct unwind_state *state, unsigned long addr,
+                                 unsigned long *ip, unsigned long *sp)
+{
+       struct pt_regs *regs = (void *)addr - IRET_FRAME_OFFSET;
 
-               *sp = regs->sp;
-       } else
-               *sp = (unsigned long)&regs->sp;
+       if (!stack_access_ok(state, addr, IRET_FRAME_SIZE))
+               return false;
 
+       *ip = regs->ip;
+       *sp = regs->sp;
        return true;
 }
 
@@ -327,7 +310,6 @@ bool unwind_next_frame(struct unwind_state *state)
        unsigned long ip_p, sp, orig_ip, prev_sp = state->sp;
        enum stack_type prev_type = state->stack_info.type;
        struct orc_entry *orc;
-       struct pt_regs *ptregs;
        bool indirect = false;
 
        if (unwind_done(state))
@@ -435,7 +417,7 @@ bool unwind_next_frame(struct unwind_state *state)
                break;
 
        case ORC_TYPE_REGS:
-               if (!deref_stack_regs(state, sp, &state->ip, &state->sp, true)) {
+               if (!deref_stack_regs(state, sp, &state->ip, &state->sp)) {
                        orc_warn("can't dereference registers at %p for ip %pB\n",
                                 (void *)sp, (void *)orig_ip);
                        goto done;
@@ -447,20 +429,14 @@ bool unwind_next_frame(struct unwind_state *state)
                break;
 
        case ORC_TYPE_REGS_IRET:
-               if (!deref_stack_regs(state, sp, &state->ip, &state->sp, false)) {
+               if (!deref_stack_iret_regs(state, sp, &state->ip, &state->sp)) {
                        orc_warn("can't dereference iret registers at %p for ip %pB\n",
                                 (void *)sp, (void *)orig_ip);
                        goto done;
                }
 
-               ptregs = container_of((void *)sp, struct pt_regs, ip);
-               if ((unsigned long)ptregs >= prev_sp &&
-                   on_stack(&state->stack_info, ptregs, REGS_SIZE)) {
-                       state->regs = ptregs;
-                       state->full_regs = false;
-               } else
-                       state->regs = NULL;
-
+               state->regs = (void *)sp - IRET_FRAME_OFFSET;
+               state->full_regs = false;
                state->signal = true;
                break;
 
@@ -553,8 +529,18 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task,
        }
 
        if (get_stack_info((unsigned long *)state->sp, state->task,
-                          &state->stack_info, &state->stack_mask))
-               return;
+                          &state->stack_info, &state->stack_mask)) {
+               /*
+                * We weren't on a valid stack.  It's possible that
+                * we overflowed a valid stack into a guard page.
+                * See if the next page up is valid so that we can
+                * generate some kind of backtrace if this happens.
+                */
+               void *next_page = (void *)PAGE_ALIGN((unsigned long)state->sp);
+               if (get_stack_info(next_page, state->task, &state->stack_info,
+                                  &state->stack_mask))
+                       return;
+       }
 
        /*
         * The caller can provide the address of the first frame directly
index a4009fb..d2a8b5a 100644 (file)
@@ -107,6 +107,15 @@ SECTIONS
                SOFTIRQENTRY_TEXT
                *(.fixup)
                *(.gnu.warning)
+
+#ifdef CONFIG_X86_64
+               . = ALIGN(PAGE_SIZE);
+               _entry_trampoline = .;
+               *(.entry_trampoline)
+               . = ALIGN(PAGE_SIZE);
+               ASSERT(. - _entry_trampoline == PAGE_SIZE, "entry trampoline is too big");
+#endif
+
                /* End of text section */
                _etext = .;
        } :text = 0x9090
index abe74f7..b514b2b 100644 (file)
@@ -2390,9 +2390,21 @@ static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, u64 smbase, int n)
 }
 
 static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt,
-                                    u64 cr0, u64 cr4)
+                                   u64 cr0, u64 cr3, u64 cr4)
 {
        int bad;
+       u64 pcid;
+
+       /* In order to later set CR4.PCIDE, CR3[11:0] must be zero.  */
+       pcid = 0;
+       if (cr4 & X86_CR4_PCIDE) {
+               pcid = cr3 & 0xfff;
+               cr3 &= ~0xfff;
+       }
+
+       bad = ctxt->ops->set_cr(ctxt, 3, cr3);
+       if (bad)
+               return X86EMUL_UNHANDLEABLE;
 
        /*
         * First enable PAE, long mode needs it before CR0.PG = 1 is set.
@@ -2411,6 +2423,12 @@ static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt,
                bad = ctxt->ops->set_cr(ctxt, 4, cr4);
                if (bad)
                        return X86EMUL_UNHANDLEABLE;
+               if (pcid) {
+                       bad = ctxt->ops->set_cr(ctxt, 3, cr3 | pcid);
+                       if (bad)
+                               return X86EMUL_UNHANDLEABLE;
+               }
+
        }
 
        return X86EMUL_CONTINUE;
@@ -2421,11 +2439,11 @@ static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt, u64 smbase)
        struct desc_struct desc;
        struct desc_ptr dt;
        u16 selector;
-       u32 val, cr0, cr4;
+       u32 val, cr0, cr3, cr4;
        int i;
 
        cr0 =                      GET_SMSTATE(u32, smbase, 0x7ffc);
-       ctxt->ops->set_cr(ctxt, 3, GET_SMSTATE(u32, smbase, 0x7ff8));
+       cr3 =                      GET_SMSTATE(u32, smbase, 0x7ff8);
        ctxt->eflags =             GET_SMSTATE(u32, smbase, 0x7ff4) | X86_EFLAGS_FIXED;
        ctxt->_eip =               GET_SMSTATE(u32, smbase, 0x7ff0);
 
@@ -2467,14 +2485,14 @@ static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt, u64 smbase)
 
        ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smbase, 0x7ef8));
 
-       return rsm_enter_protected_mode(ctxt, cr0, cr4);
+       return rsm_enter_protected_mode(ctxt, cr0, cr3, cr4);
 }
 
 static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)
 {
        struct desc_struct desc;
        struct desc_ptr dt;
-       u64 val, cr0, cr4;
+       u64 val, cr0, cr3, cr4;
        u32 base3;
        u16 selector;
        int i, r;
@@ -2491,7 +2509,7 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)
        ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1);
 
        cr0 =                       GET_SMSTATE(u64, smbase, 0x7f58);
-       ctxt->ops->set_cr(ctxt, 3,  GET_SMSTATE(u64, smbase, 0x7f50));
+       cr3 =                       GET_SMSTATE(u64, smbase, 0x7f50);
        cr4 =                       GET_SMSTATE(u64, smbase, 0x7f48);
        ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smbase, 0x7f00));
        val =                       GET_SMSTATE(u64, smbase, 0x7ed0);
@@ -2519,7 +2537,7 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)
        dt.address =                GET_SMSTATE(u64, smbase, 0x7e68);
        ctxt->ops->set_gdt(ctxt, &dt);
 
-       r = rsm_enter_protected_mode(ctxt, cr0, cr4);
+       r = rsm_enter_protected_mode(ctxt, cr0, cr3, cr4);
        if (r != X86EMUL_CONTINUE)
                return r;
 
index e5e66e5..c4deb1f 100644 (file)
@@ -3395,7 +3395,7 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
                spin_lock(&vcpu->kvm->mmu_lock);
                if(make_mmu_pages_available(vcpu) < 0) {
                        spin_unlock(&vcpu->kvm->mmu_lock);
-                       return 1;
+                       return -ENOSPC;
                }
                sp = kvm_mmu_get_page(vcpu, 0, 0,
                                vcpu->arch.mmu.shadow_root_level, 1, ACC_ALL);
@@ -3410,7 +3410,7 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
                        spin_lock(&vcpu->kvm->mmu_lock);
                        if (make_mmu_pages_available(vcpu) < 0) {
                                spin_unlock(&vcpu->kvm->mmu_lock);
-                               return 1;
+                               return -ENOSPC;
                        }
                        sp = kvm_mmu_get_page(vcpu, i << (30 - PAGE_SHIFT),
                                        i << 30, PT32_ROOT_LEVEL, 1, ACC_ALL);
@@ -3450,7 +3450,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
                spin_lock(&vcpu->kvm->mmu_lock);
                if (make_mmu_pages_available(vcpu) < 0) {
                        spin_unlock(&vcpu->kvm->mmu_lock);
-                       return 1;
+                       return -ENOSPC;
                }
                sp = kvm_mmu_get_page(vcpu, root_gfn, 0,
                                vcpu->arch.mmu.shadow_root_level, 0, ACC_ALL);
@@ -3487,7 +3487,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
                spin_lock(&vcpu->kvm->mmu_lock);
                if (make_mmu_pages_available(vcpu) < 0) {
                        spin_unlock(&vcpu->kvm->mmu_lock);
-                       return 1;
+                       return -ENOSPC;
                }
                sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30, PT32_ROOT_LEVEL,
                                      0, ACC_ALL);
index 8eba631..023afa0 100644 (file)
@@ -2302,7 +2302,7 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
                 * processors.  See 22.2.4.
                 */
                vmcs_writel(HOST_TR_BASE,
-                           (unsigned long)this_cpu_ptr(&cpu_tss));
+                           (unsigned long)&get_cpu_entry_area(cpu)->tss.x86_tss);
                vmcs_writel(HOST_GDTR_BASE, (unsigned long)gdt);   /* 22.2.4 */
 
                /*
index faf843c..1cec2c6 100644 (file)
@@ -4384,7 +4384,7 @@ static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v)
                                         addr, n, v))
                    && kvm_io_bus_read(vcpu, KVM_MMIO_BUS, addr, n, v))
                        break;
-               trace_kvm_mmio(KVM_TRACE_MMIO_READ, n, addr, *(u64 *)v);
+               trace_kvm_mmio(KVM_TRACE_MMIO_READ, n, addr, v);
                handled += n;
                addr += n;
                len -= n;
@@ -4643,7 +4643,7 @@ static int read_prepare(struct kvm_vcpu *vcpu, void *val, int bytes)
 {
        if (vcpu->mmio_read_completed) {
                trace_kvm_mmio(KVM_TRACE_MMIO_READ, bytes,
-                              vcpu->mmio_fragments[0].gpa, *(u64 *)val);
+                              vcpu->mmio_fragments[0].gpa, val);
                vcpu->mmio_read_completed = 0;
                return 1;
        }
@@ -4665,14 +4665,14 @@ static int write_emulate(struct kvm_vcpu *vcpu, gpa_t gpa,
 
 static int write_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes, void *val)
 {
-       trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, bytes, gpa, *(u64 *)val);
+       trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, bytes, gpa, val);
        return vcpu_mmio_write(vcpu, gpa, bytes, val);
 }
 
 static int read_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa,
                          void *val, int bytes)
 {
-       trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, bytes, gpa, 0);
+       trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, bytes, gpa, NULL);
        return X86EMUL_IO_NEEDED;
 }
 
@@ -7264,13 +7264,12 @@ static int complete_emulated_mmio(struct kvm_vcpu *vcpu)
 
 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 {
-       struct fpu *fpu = &current->thread.fpu;
        int r;
 
-       fpu__initialize(fpu);
-
        kvm_sigset_activate(vcpu);
 
+       kvm_load_guest_fpu(vcpu);
+
        if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
                if (kvm_run->immediate_exit) {
                        r = -EINTR;
@@ -7296,14 +7295,12 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
                }
        }
 
-       kvm_load_guest_fpu(vcpu);
-
        if (unlikely(vcpu->arch.complete_userspace_io)) {
                int (*cui)(struct kvm_vcpu *) = vcpu->arch.complete_userspace_io;
                vcpu->arch.complete_userspace_io = NULL;
                r = cui(vcpu);
                if (r <= 0)
-                       goto out_fpu;
+                       goto out;
        } else
                WARN_ON(vcpu->arch.pio.count || vcpu->mmio_needed);
 
@@ -7312,9 +7309,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
        else
                r = vcpu_run(vcpu);
 
-out_fpu:
-       kvm_put_guest_fpu(vcpu);
 out:
+       kvm_put_guest_fpu(vcpu);
        post_kvm_run_save(vcpu);
        kvm_sigset_deactivate(vcpu);
 
@@ -7384,7 +7380,7 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
 #endif
 
        kvm_rip_write(vcpu, regs->rip);
-       kvm_set_rflags(vcpu, regs->rflags);
+       kvm_set_rflags(vcpu, regs->rflags | X86_EFLAGS_FIXED);
 
        vcpu->arch.exception.pending = false;
 
@@ -7498,6 +7494,29 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index,
 }
 EXPORT_SYMBOL_GPL(kvm_task_switch);
 
+int kvm_valid_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
+{
+       if ((sregs->efer & EFER_LME) && (sregs->cr0 & X86_CR0_PG_BIT)) {
+               /*
+                * When EFER.LME and CR0.PG are set, the processor is in
+                * 64-bit mode (though maybe in a 32-bit code segment).
+                * CR4.PAE and EFER.LMA must be set.
+                */
+               if (!(sregs->cr4 & X86_CR4_PAE_BIT)
+                   || !(sregs->efer & EFER_LMA))
+                       return -EINVAL;
+       } else {
+               /*
+                * Not in 64-bit mode: EFER.LMA is clear and the code
+                * segment cannot be 64-bit.
+                */
+               if (sregs->efer & EFER_LMA || sregs->cs.l)
+                       return -EINVAL;
+       }
+
+       return 0;
+}
+
 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
                                  struct kvm_sregs *sregs)
 {
@@ -7510,6 +7529,9 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
                        (sregs->cr4 & X86_CR4_OSXSAVE))
                return -EINVAL;
 
+       if (kvm_valid_sregs(vcpu, sregs))
+               return -EINVAL;
+
        apic_base_msr.data = sregs->apic_base;
        apic_base_msr.host_initiated = true;
        if (kvm_set_apic_base(vcpu, &apic_base_msr))
index 553f8fd..4846eff 100644 (file)
@@ -107,10 +107,10 @@ static void delay_mwaitx(unsigned long __loops)
                delay = min_t(u64, MWAITX_MAX_LOOPS, loops);
 
                /*
-                * Use cpu_tss as a cacheline-aligned, seldomly
+                * Use cpu_tss_rw as a cacheline-aligned, seldomly
                 * accessed per-cpu variable as the monitor target.
                 */
-               __monitorx(raw_cpu_ptr(&cpu_tss), 0, 0);
+               __monitorx(raw_cpu_ptr(&cpu_tss_rw), 0, 0);
 
                /*
                 * AMD, like Intel, supports the EAX hint and EAX=0xf
index 8e13b8c..52195ee 100644 (file)
@@ -10,7 +10,7 @@ CFLAGS_REMOVE_mem_encrypt.o   = -pg
 endif
 
 obj-y  :=  init.o init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \
-           pat.o pgtable.o physaddr.o setup_nx.o tlb.o
+           pat.o pgtable.o physaddr.o setup_nx.o tlb.o cpu_entry_area.o
 
 # Make sure __phys_addr has no stackprotector
 nostackp := $(call cc-option, -fno-stack-protector)
diff --git a/arch/x86/mm/cpu_entry_area.c b/arch/x86/mm/cpu_entry_area.c
new file mode 100644 (file)
index 0000000..fe814fd
--- /dev/null
@@ -0,0 +1,139 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/spinlock.h>
+#include <linux/percpu.h>
+
+#include <asm/cpu_entry_area.h>
+#include <asm/pgtable.h>
+#include <asm/fixmap.h>
+#include <asm/desc.h>
+
+static DEFINE_PER_CPU_PAGE_ALIGNED(struct entry_stack_page, entry_stack_storage);
+
+#ifdef CONFIG_X86_64
+static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks
+       [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]);
+#endif
+
+struct cpu_entry_area *get_cpu_entry_area(int cpu)
+{
+       unsigned long va = CPU_ENTRY_AREA_PER_CPU + cpu * CPU_ENTRY_AREA_SIZE;
+       BUILD_BUG_ON(sizeof(struct cpu_entry_area) % PAGE_SIZE != 0);
+
+       return (struct cpu_entry_area *) va;
+}
+EXPORT_SYMBOL(get_cpu_entry_area);
+
+void cea_set_pte(void *cea_vaddr, phys_addr_t pa, pgprot_t flags)
+{
+       unsigned long va = (unsigned long) cea_vaddr;
+
+       set_pte_vaddr(va, pfn_pte(pa >> PAGE_SHIFT, flags));
+}
+
+static void __init
+cea_map_percpu_pages(void *cea_vaddr, void *ptr, int pages, pgprot_t prot)
+{
+       for ( ; pages; pages--, cea_vaddr+= PAGE_SIZE, ptr += PAGE_SIZE)
+               cea_set_pte(cea_vaddr, per_cpu_ptr_to_phys(ptr), prot);
+}
+
+/* Setup the fixmap mappings only once per-processor */
+static void __init setup_cpu_entry_area(int cpu)
+{
+#ifdef CONFIG_X86_64
+       extern char _entry_trampoline[];
+
+       /* On 64-bit systems, we use a read-only fixmap GDT and TSS. */
+       pgprot_t gdt_prot = PAGE_KERNEL_RO;
+       pgprot_t tss_prot = PAGE_KERNEL_RO;
+#else
+       /*
+        * On native 32-bit systems, the GDT cannot be read-only because
+        * our double fault handler uses a task gate, and entering through
+        * a task gate needs to change an available TSS to busy.  If the
+        * GDT is read-only, that will triple fault.  The TSS cannot be
+        * read-only because the CPU writes to it on task switches.
+        *
+        * On Xen PV, the GDT must be read-only because the hypervisor
+        * requires it.
+        */
+       pgprot_t gdt_prot = boot_cpu_has(X86_FEATURE_XENPV) ?
+               PAGE_KERNEL_RO : PAGE_KERNEL;
+       pgprot_t tss_prot = PAGE_KERNEL;
+#endif
+
+       cea_set_pte(&get_cpu_entry_area(cpu)->gdt, get_cpu_gdt_paddr(cpu),
+                   gdt_prot);
+
+       cea_map_percpu_pages(&get_cpu_entry_area(cpu)->entry_stack_page,
+                            per_cpu_ptr(&entry_stack_storage, cpu), 1,
+                            PAGE_KERNEL);
+
+       /*
+        * The Intel SDM says (Volume 3, 7.2.1):
+        *
+        *  Avoid placing a page boundary in the part of the TSS that the
+        *  processor reads during a task switch (the first 104 bytes). The
+        *  processor may not correctly perform address translations if a
+        *  boundary occurs in this area. During a task switch, the processor
+        *  reads and writes into the first 104 bytes of each TSS (using
+        *  contiguous physical addresses beginning with the physical address
+        *  of the first byte of the TSS). So, after TSS access begins, if
+        *  part of the 104 bytes is not physically contiguous, the processor
+        *  will access incorrect information without generating a page-fault
+        *  exception.
+        *
+        * There are also a lot of errata involving the TSS spanning a page
+        * boundary.  Assert that we're not doing that.
+        */
+       BUILD_BUG_ON((offsetof(struct tss_struct, x86_tss) ^
+                     offsetofend(struct tss_struct, x86_tss)) & PAGE_MASK);
+       BUILD_BUG_ON(sizeof(struct tss_struct) % PAGE_SIZE != 0);
+       cea_map_percpu_pages(&get_cpu_entry_area(cpu)->tss,
+                            &per_cpu(cpu_tss_rw, cpu),
+                            sizeof(struct tss_struct) / PAGE_SIZE, tss_prot);
+
+#ifdef CONFIG_X86_32
+       per_cpu(cpu_entry_area, cpu) = get_cpu_entry_area(cpu);
+#endif
+
+#ifdef CONFIG_X86_64
+       BUILD_BUG_ON(sizeof(exception_stacks) % PAGE_SIZE != 0);
+       BUILD_BUG_ON(sizeof(exception_stacks) !=
+                    sizeof(((struct cpu_entry_area *)0)->exception_stacks));
+       cea_map_percpu_pages(&get_cpu_entry_area(cpu)->exception_stacks,
+                            &per_cpu(exception_stacks, cpu),
+                            sizeof(exception_stacks) / PAGE_SIZE, PAGE_KERNEL);
+
+       cea_set_pte(&get_cpu_entry_area(cpu)->entry_trampoline,
+                    __pa_symbol(_entry_trampoline), PAGE_KERNEL_RX);
+#endif
+}
+
+static __init void setup_cpu_entry_area_ptes(void)
+{
+#ifdef CONFIG_X86_32
+       unsigned long start, end;
+
+       BUILD_BUG_ON(CPU_ENTRY_AREA_PAGES * PAGE_SIZE < CPU_ENTRY_AREA_MAP_SIZE);
+       BUG_ON(CPU_ENTRY_AREA_BASE & ~PMD_MASK);
+
+       start = CPU_ENTRY_AREA_BASE;
+       end = start + CPU_ENTRY_AREA_MAP_SIZE;
+
+       /* Careful here: start + PMD_SIZE might wrap around */
+       for (; start < end && start >= CPU_ENTRY_AREA_BASE; start += PMD_SIZE)
+               populate_extra_pte(start);
+#endif
+}
+
+void __init setup_cpu_entry_areas(void)
+{
+       unsigned int cpu;
+
+       setup_cpu_entry_area_ptes();
+
+       for_each_possible_cpu(cpu)
+               setup_cpu_entry_area(cpu);
+}
index 5e3ac6f..43dedbf 100644 (file)
@@ -44,10 +44,12 @@ struct addr_marker {
        unsigned long max_lines;
 };
 
-/* indices for address_markers; keep sync'd w/ address_markers below */
+/* Address space markers hints */
+
+#ifdef CONFIG_X86_64
+
 enum address_markers_idx {
        USER_SPACE_NR = 0,
-#ifdef CONFIG_X86_64
        KERNEL_SPACE_NR,
        LOW_KERNEL_NR,
        VMALLOC_START_NR,
@@ -56,56 +58,74 @@ enum address_markers_idx {
        KASAN_SHADOW_START_NR,
        KASAN_SHADOW_END_NR,
 #endif
-# ifdef CONFIG_X86_ESPFIX64
+       CPU_ENTRY_AREA_NR,
+#ifdef CONFIG_X86_ESPFIX64
        ESPFIX_START_NR,
-# endif
+#endif
+#ifdef CONFIG_EFI
+       EFI_END_NR,
+#endif
        HIGH_KERNEL_NR,
        MODULES_VADDR_NR,
        MODULES_END_NR,
-#else
+       FIXADDR_START_NR,
+       END_OF_SPACE_NR,
+};
+
+static struct addr_marker address_markers[] = {
+       [USER_SPACE_NR]         = { 0,                  "User Space" },
+       [KERNEL_SPACE_NR]       = { (1UL << 63),        "Kernel Space" },
+       [LOW_KERNEL_NR]         = { 0UL,                "Low Kernel Mapping" },
+       [VMALLOC_START_NR]      = { 0UL,                "vmalloc() Area" },
+       [VMEMMAP_START_NR]      = { 0UL,                "Vmemmap" },
+#ifdef CONFIG_KASAN
+       [KASAN_SHADOW_START_NR] = { KASAN_SHADOW_START, "KASAN shadow" },
+       [KASAN_SHADOW_END_NR]   = { KASAN_SHADOW_END,   "KASAN shadow end" },
+#endif
+       [CPU_ENTRY_AREA_NR]     = { CPU_ENTRY_AREA_BASE,"CPU entry Area" },
+#ifdef CONFIG_X86_ESPFIX64
+       [ESPFIX_START_NR]       = { ESPFIX_BASE_ADDR,   "ESPfix Area", 16 },
+#endif
+#ifdef CONFIG_EFI
+       [EFI_END_NR]            = { EFI_VA_END,         "EFI Runtime Services" },
+#endif
+       [HIGH_KERNEL_NR]        = { __START_KERNEL_map, "High Kernel Mapping" },
+       [MODULES_VADDR_NR]      = { MODULES_VADDR,      "Modules" },
+       [MODULES_END_NR]        = { MODULES_END,        "End Modules" },
+       [FIXADDR_START_NR]      = { FIXADDR_START,      "Fixmap Area" },
+       [END_OF_SPACE_NR]       = { -1,                 NULL }
+};
+
+#else /* CONFIG_X86_64 */
+
+enum address_markers_idx {
+       USER_SPACE_NR = 0,
        KERNEL_SPACE_NR,
        VMALLOC_START_NR,
        VMALLOC_END_NR,
-# ifdef CONFIG_HIGHMEM
+#ifdef CONFIG_HIGHMEM
        PKMAP_BASE_NR,
-# endif
-       FIXADDR_START_NR,
 #endif
+       CPU_ENTRY_AREA_NR,
+       FIXADDR_START_NR,
+       END_OF_SPACE_NR,
 };
 
-/* Address space markers hints */
 static struct addr_marker address_markers[] = {
-       { 0, "User Space" },
-#ifdef CONFIG_X86_64
-       { 0x8000000000000000UL, "Kernel Space" },
-       { 0/* PAGE_OFFSET */,   "Low Kernel Mapping" },
-       { 0/* VMALLOC_START */, "vmalloc() Area" },
-       { 0/* VMEMMAP_START */, "Vmemmap" },
-#ifdef CONFIG_KASAN
-       { KASAN_SHADOW_START,   "KASAN shadow" },
-       { KASAN_SHADOW_END,     "KASAN shadow end" },
+       [USER_SPACE_NR]         = { 0,                  "User Space" },
+       [KERNEL_SPACE_NR]       = { PAGE_OFFSET,        "Kernel Mapping" },
+       [VMALLOC_START_NR]      = { 0UL,                "vmalloc() Area" },
+       [VMALLOC_END_NR]        = { 0UL,                "vmalloc() End" },
+#ifdef CONFIG_HIGHMEM
+       [PKMAP_BASE_NR]         = { 0UL,                "Persistent kmap() Area" },
 #endif
-# ifdef CONFIG_X86_ESPFIX64
-       { ESPFIX_BASE_ADDR,     "ESPfix Area", 16 },
-# endif
-# ifdef CONFIG_EFI
-       { EFI_VA_END,           "EFI Runtime Services" },
-# endif
-       { __START_KERNEL_map,   "High Kernel Mapping" },
-       { MODULES_VADDR,        "Modules" },
-       { MODULES_END,          "End Modules" },
-#else
-       { PAGE_OFFSET,          "Kernel Mapping" },
-       { 0/* VMALLOC_START */, "vmalloc() Area" },
-       { 0/*VMALLOC_END*/,     "vmalloc() End" },
-# ifdef CONFIG_HIGHMEM
-       { 0/*PKMAP_BASE*/,      "Persistent kmap() Area" },
-# endif
-       { 0/*FIXADDR_START*/,   "Fixmap Area" },
-#endif
-       { -1, NULL }            /* End of list */
+       [CPU_ENTRY_AREA_NR]     = { 0UL,                "CPU entry area" },
+       [FIXADDR_START_NR]      = { 0UL,                "Fixmap area" },
+       [END_OF_SPACE_NR]       = { -1,                 NULL }
 };
 
+#endif /* !CONFIG_X86_64 */
+
 /* Multipliers for offsets within the PTEs */
 #define PTE_LEVEL_MULT (PAGE_SIZE)
 #define PMD_LEVEL_MULT (PTRS_PER_PTE * PTE_LEVEL_MULT)
@@ -140,7 +160,7 @@ static void printk_prot(struct seq_file *m, pgprot_t prot, int level, bool dmsg)
        static const char * const level_name[] =
                { "cr3", "pgd", "p4d", "pud", "pmd", "pte" };
 
-       if (!pgprot_val(prot)) {
+       if (!(pr & _PAGE_PRESENT)) {
                /* Not present */
                pt_dump_cont_printf(m, dmsg, "                              ");
        } else {
@@ -525,8 +545,8 @@ static int __init pt_dump_init(void)
        address_markers[PKMAP_BASE_NR].start_address = PKMAP_BASE;
 # endif
        address_markers[FIXADDR_START_NR].start_address = FIXADDR_START;
+       address_markers[CPU_ENTRY_AREA_NR].start_address = CPU_ENTRY_AREA_BASE;
 #endif
-
        return 0;
 }
 __initcall(pt_dump_init);
index febf698..06fe3d5 100644 (file)
@@ -860,7 +860,7 @@ show_signal_msg(struct pt_regs *regs, unsigned long error_code,
        if (!printk_ratelimit())
                return;
 
-       printk("%s%s[%d]: segfault at %lx ip %p sp %p error %lx",
+       printk("%s%s[%d]: segfault at %lx ip %px sp %px error %lx",
                task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
                tsk->comm, task_pid_nr(tsk), address,
                (void *)regs->ip, (void *)regs->sp, error_code);
index 8a64a6f..135c9a7 100644 (file)
@@ -50,6 +50,7 @@
 #include <asm/setup.h>
 #include <asm/set_memory.h>
 #include <asm/page_types.h>
+#include <asm/cpu_entry_area.h>
 #include <asm/init.h>
 
 #include "mm_internal.h"
@@ -766,6 +767,7 @@ void __init mem_init(void)
        mem_init_print_info(NULL);
        printk(KERN_INFO "virtual kernel memory layout:\n"
                "    fixmap  : 0x%08lx - 0x%08lx   (%4ld kB)\n"
+               "  cpu_entry : 0x%08lx - 0x%08lx   (%4ld kB)\n"
 #ifdef CONFIG_HIGHMEM
                "    pkmap   : 0x%08lx - 0x%08lx   (%4ld kB)\n"
 #endif
@@ -777,6 +779,10 @@ void __init mem_init(void)
                FIXADDR_START, FIXADDR_TOP,
                (FIXADDR_TOP - FIXADDR_START) >> 10,
 
+               CPU_ENTRY_AREA_BASE,
+               CPU_ENTRY_AREA_BASE + CPU_ENTRY_AREA_MAP_SIZE,
+               CPU_ENTRY_AREA_MAP_SIZE >> 10,
+
 #ifdef CONFIG_HIGHMEM
                PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
                (LAST_PKMAP*PAGE_SIZE) >> 10,
index 99dfed6..47388f0 100644 (file)
@@ -15,6 +15,7 @@
 #include <asm/tlbflush.h>
 #include <asm/sections.h>
 #include <asm/pgtable.h>
+#include <asm/cpu_entry_area.h>
 
 extern struct range pfn_mapped[E820_MAX_ENTRIES];
 
@@ -277,6 +278,7 @@ void __init kasan_early_init(void)
 void __init kasan_init(void)
 {
        int i;
+       void *shadow_cpu_entry_begin, *shadow_cpu_entry_end;
 
 #ifdef CONFIG_KASAN_INLINE
        register_die_notifier(&kasan_die_notifier);
@@ -321,16 +323,33 @@ void __init kasan_init(void)
                map_range(&pfn_mapped[i]);
        }
 
+       shadow_cpu_entry_begin = (void *)CPU_ENTRY_AREA_BASE;
+       shadow_cpu_entry_begin = kasan_mem_to_shadow(shadow_cpu_entry_begin);
+       shadow_cpu_entry_begin = (void *)round_down((unsigned long)shadow_cpu_entry_begin,
+                                               PAGE_SIZE);
+
+       shadow_cpu_entry_end = (void *)(CPU_ENTRY_AREA_BASE +
+                                       CPU_ENTRY_AREA_MAP_SIZE);
+       shadow_cpu_entry_end = kasan_mem_to_shadow(shadow_cpu_entry_end);
+       shadow_cpu_entry_end = (void *)round_up((unsigned long)shadow_cpu_entry_end,
+                                       PAGE_SIZE);
+
        kasan_populate_zero_shadow(
                kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM),
-               kasan_mem_to_shadow((void *)__START_KERNEL_map));
+               shadow_cpu_entry_begin);
+
+       kasan_populate_shadow((unsigned long)shadow_cpu_entry_begin,
+                             (unsigned long)shadow_cpu_entry_end, 0);
+
+       kasan_populate_zero_shadow(shadow_cpu_entry_end,
+                               kasan_mem_to_shadow((void *)__START_KERNEL_map));
 
        kasan_populate_shadow((unsigned long)kasan_mem_to_shadow(_stext),
                              (unsigned long)kasan_mem_to_shadow(_end),
                              early_pfn_to_nid(__pa(_stext)));
 
        kasan_populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END),
-                       (void *)KASAN_SHADOW_END);
+                               (void *)KASAN_SHADOW_END);
 
        load_cr3(init_top_pgt);
        __flush_tlb_all();
index 6b9bf02..c3c5274 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/pagemap.h>
 #include <linux/spinlock.h>
 
+#include <asm/cpu_entry_area.h>
 #include <asm/pgtable.h>
 #include <asm/pgalloc.h>
 #include <asm/fixmap.h>
index 3118392..0a1be3a 100644 (file)
@@ -128,7 +128,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
         * isn't free.
         */
 #ifdef CONFIG_DEBUG_VM
-       if (WARN_ON_ONCE(__read_cr3() != build_cr3(real_prev, prev_asid))) {
+       if (WARN_ON_ONCE(__read_cr3() != build_cr3(real_prev->pgd, prev_asid))) {
                /*
                 * If we were to BUG here, we'd be very likely to kill
                 * the system so hard that we don't see the call trace.
@@ -195,7 +195,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
                if (need_flush) {
                        this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id);
                        this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen);
-                       write_cr3(build_cr3(next, new_asid));
+                       write_cr3(build_cr3(next->pgd, new_asid));
 
                        /*
                         * NB: This gets called via leave_mm() in the idle path
@@ -208,7 +208,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
                        trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
                } else {
                        /* The new ASID is already up to date. */
-                       write_cr3(build_cr3_noflush(next, new_asid));
+                       write_cr3(build_cr3_noflush(next->pgd, new_asid));
 
                        /* See above wrt _rcuidle. */
                        trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, 0);
@@ -288,7 +288,7 @@ void initialize_tlbstate_and_flush(void)
                !(cr4_read_shadow() & X86_CR4_PCIDE));
 
        /* Force ASID 0 and force a TLB flush. */
-       write_cr3(build_cr3(mm, 0));
+       write_cr3(build_cr3(mm->pgd, 0));
 
        /* Reinitialize tlbstate. */
        this_cpu_write(cpu_tlbstate.loaded_mm_asid, 0);
@@ -551,7 +551,7 @@ static void do_kernel_range_flush(void *info)
 
        /* flush range by one by one 'invlpg' */
        for (addr = f->start; addr < f->end; addr += PAGE_SIZE)
-               __flush_tlb_single(addr);
+               __flush_tlb_one(addr);
 }
 
 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
index f44c0bc..8538a67 100644 (file)
@@ -299,7 +299,7 @@ static void bau_process_message(struct msg_desc *mdp, struct bau_control *bcp,
                local_flush_tlb();
                stat->d_alltlb++;
        } else {
-               __flush_tlb_one(msg->address);
+               __flush_tlb_single(msg->address);
                stat->d_onetlb++;
        }
        stat->d_requestee++;
index 36a28ed..a7d9669 100644 (file)
@@ -152,17 +152,19 @@ static void do_fpu_end(void)
 static void fix_processor_context(void)
 {
        int cpu = smp_processor_id();
-       struct tss_struct *t = &per_cpu(cpu_tss, cpu);
 #ifdef CONFIG_X86_64
        struct desc_struct *desc = get_cpu_gdt_rw(cpu);
        tss_desc tss;
 #endif
-       set_tss_desc(cpu, t);   /*
-                                * This just modifies memory; should not be
-                                * necessary. But... This is necessary, because
-                                * 386 hardware has concept of busy TSS or some
-                                * similar stupidity.
-                                */
+
+       /*
+        * We need to reload TR, which requires that we change the
+        * GDT entry to indicate "available" first.
+        *
+        * XXX: This could probably all be replaced by a call to
+        * force_reload_TR().
+        */
+       set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss);
 
 #ifdef CONFIG_X86_64
        memcpy(&tss, &desc[GDT_ENTRY_TSS], sizeof(tss_desc));
index d669e9d..c9081c6 100644 (file)
@@ -1,8 +1,12 @@
+#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
+#include <linux/bootmem.h>
+#endif
 #include <linux/cpu.h>
 #include <linux/kexec.h>
 
 #include <xen/features.h>
 #include <xen/page.h>
+#include <xen/interface/memory.h>
 
 #include <asm/xen/hypercall.h>
 #include <asm/xen/hypervisor.h>
@@ -331,3 +335,80 @@ void xen_arch_unregister_cpu(int num)
 }
 EXPORT_SYMBOL(xen_arch_unregister_cpu);
 #endif
+
+#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
+void __init arch_xen_balloon_init(struct resource *hostmem_resource)
+{
+       struct xen_memory_map memmap;
+       int rc;
+       unsigned int i, last_guest_ram;
+       phys_addr_t max_addr = PFN_PHYS(max_pfn);
+       struct e820_table *xen_e820_table;
+       const struct e820_entry *entry;
+       struct resource *res;
+
+       if (!xen_initial_domain())
+               return;
+
+       xen_e820_table = kmalloc(sizeof(*xen_e820_table), GFP_KERNEL);
+       if (!xen_e820_table)
+               return;
+
+       memmap.nr_entries = ARRAY_SIZE(xen_e820_table->entries);
+       set_xen_guest_handle(memmap.buffer, xen_e820_table->entries);
+       rc = HYPERVISOR_memory_op(XENMEM_machine_memory_map, &memmap);
+       if (rc) {
+               pr_warn("%s: Can't read host e820 (%d)\n", __func__, rc);
+               goto out;
+       }
+
+       last_guest_ram = 0;
+       for (i = 0; i < memmap.nr_entries; i++) {
+               if (xen_e820_table->entries[i].addr >= max_addr)
+                       break;
+               if (xen_e820_table->entries[i].type == E820_TYPE_RAM)
+                       last_guest_ram = i;
+       }
+
+       entry = &xen_e820_table->entries[last_guest_ram];
+       if (max_addr >= entry->addr + entry->size)
+               goto out; /* No unallocated host RAM. */
+
+       hostmem_resource->start = max_addr;
+       hostmem_resource->end = entry->addr + entry->size;
+
+       /*
+        * Mark non-RAM regions between the end of dom0 RAM and end of host RAM
+        * as unavailable. The rest of that region can be used for hotplug-based
+        * ballooning.
+        */
+       for (; i < memmap.nr_entries; i++) {
+               entry = &xen_e820_table->entries[i];
+
+               if (entry->type == E820_TYPE_RAM)
+                       continue;
+
+               if (entry->addr >= hostmem_resource->end)
+                       break;
+
+               res = kzalloc(sizeof(*res), GFP_KERNEL);
+               if (!res)
+                       goto out;
+
+               res->name = "Unavailable host RAM";
+               res->start = entry->addr;
+               res->end = (entry->addr + entry->size < hostmem_resource->end) ?
+                           entry->addr + entry->size : hostmem_resource->end;
+               rc = insert_resource(hostmem_resource, res);
+               if (rc) {
+                       pr_warn("%s: Can't insert [%llx - %llx) (%d)\n",
+                               __func__, res->start, res->end, rc);
+                       kfree(res);
+                       goto  out;
+               }
+       }
+
+ out:
+       kfree(xen_e820_table);
+}
+#endif /* CONFIG_XEN_BALLOON_MEMORY_HOTPLUG */
index f2414c6..c047f42 100644 (file)
@@ -88,6 +88,8 @@
 #include "multicalls.h"
 #include "pmu.h"
 
+#include "../kernel/cpu/cpu.h" /* get_cpu_cap() */
+
 void *xen_initial_gdt;
 
 static int xen_cpu_up_prepare_pv(unsigned int cpu);
@@ -826,7 +828,7 @@ static void xen_load_sp0(unsigned long sp0)
        mcs = xen_mc_entry(0);
        MULTI_stack_switch(mcs.mc, __KERNEL_DS, sp0);
        xen_mc_issue(PARAVIRT_LAZY_CPU);
-       this_cpu_write(cpu_tss.x86_tss.sp0, sp0);
+       this_cpu_write(cpu_tss_rw.x86_tss.sp0, sp0);
 }
 
 void xen_set_iopl_mask(unsigned mask)
@@ -1258,6 +1260,7 @@ asmlinkage __visible void __init xen_start_kernel(void)
        __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
 
        /* Work out if we support NX */
+       get_cpu_cap(&boot_cpu_data);
        x86_configure_nx();
 
        /* Get mfn list */
index fc048ec..4d62c07 100644 (file)
@@ -1902,6 +1902,18 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
        /* Graft it onto L4[511][510] */
        copy_page(level2_kernel_pgt, l2);
 
+       /*
+        * Zap execute permission from the ident map. Due to the sharing of
+        * L1 entries we need to do this in the L2.
+        */
+       if (__supported_pte_mask & _PAGE_NX) {
+               for (i = 0; i < PTRS_PER_PMD; ++i) {
+                       if (pmd_none(level2_ident_pgt[i]))
+                               continue;
+                       level2_ident_pgt[i] = pmd_set_flags(level2_ident_pgt[i], _PAGE_NX);
+               }
+       }
+
        /* Copy the initial P->M table mappings if necessary. */
        i = pgd_index(xen_start_info->mfn_list);
        if (i && i < pgd_index(__START_KERNEL_map))
@@ -2261,7 +2273,6 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
 
        switch (idx) {
        case FIX_BTMAP_END ... FIX_BTMAP_BEGIN:
-       case FIX_RO_IDT:
 #ifdef CONFIG_X86_32
        case FIX_WP_TEST:
 # ifdef CONFIG_HIGHMEM
@@ -2272,7 +2283,6 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
 #endif
        case FIX_TEXT_POKE0:
        case FIX_TEXT_POKE1:
-       case FIX_GDT_REMAP_BEGIN ... FIX_GDT_REMAP_END:
                /* All local page mappings */
                pte = pfn_pte(phys, prot);
                break;
index c114ca7..6e0d208 100644 (file)
@@ -808,7 +808,6 @@ char * __init xen_memory_setup(void)
        addr = xen_e820_table.entries[0].addr;
        size = xen_e820_table.entries[0].size;
        while (i < xen_e820_table.nr_entries) {
-               bool discard = false;
 
                chunk_size = size;
                type = xen_e820_table.entries[i].type;
@@ -824,11 +823,10 @@ char * __init xen_memory_setup(void)
                                xen_add_extra_mem(pfn_s, n_pfns);
                                xen_max_p2m_pfn = pfn_s + n_pfns;
                        } else
-                               discard = true;
+                               type = E820_TYPE_UNUSABLE;
                }
 
-               if (!discard)
-                       xen_align_and_add_e820_region(addr, chunk_size, type);
+               xen_align_and_add_e820_region(addr, chunk_size, type);
 
                addr += chunk_size;
                size -= chunk_size;
index 8bfdea5..9ef6cf3 100644 (file)
@@ -599,6 +599,8 @@ void __bio_clone_fast(struct bio *bio, struct bio *bio_src)
        bio->bi_disk = bio_src->bi_disk;
        bio->bi_partno = bio_src->bi_partno;
        bio_set_flag(bio, BIO_CLONED);
+       if (bio_flagged(bio_src, BIO_THROTTLED))
+               bio_set_flag(bio, BIO_THROTTLED);
        bio->bi_opf = bio_src->bi_opf;
        bio->bi_write_hint = bio_src->bi_write_hint;
        bio->bi_iter = bio_src->bi_iter;
index b21f8e8..d3a9471 100644 (file)
 #include "blk.h"
 
 /*
- * Append a bio to a passthrough request.  Only works can be merged into
- * the request based on the driver constraints.
+ * Append a bio to a passthrough request.  Only works if the bio can be merged
+ * into the request based on the driver constraints.
  */
-int blk_rq_append_bio(struct request *rq, struct bio *bio)
+int blk_rq_append_bio(struct request *rq, struct bio **bio)
 {
-       blk_queue_bounce(rq->q, &bio);
+       struct bio *orig_bio = *bio;
+
+       blk_queue_bounce(rq->q, bio);
 
        if (!rq->bio) {
-               blk_rq_bio_prep(rq->q, rq, bio);
+               blk_rq_bio_prep(rq->q, rq, *bio);
        } else {
-               if (!ll_back_merge_fn(rq->q, rq, bio))
+               if (!ll_back_merge_fn(rq->q, rq, *bio)) {
+                       if (orig_bio != *bio) {
+                               bio_put(*bio);
+                               *bio = orig_bio;
+                       }
                        return -EINVAL;
+               }
 
-               rq->biotail->bi_next = bio;
-               rq->biotail = bio;
-               rq->__data_len += bio->bi_iter.bi_size;
+               rq->biotail->bi_next = *bio;
+               rq->biotail = *bio;
+               rq->__data_len += (*bio)->bi_iter.bi_size;
        }
 
        return 0;
@@ -73,14 +80,12 @@ static int __blk_rq_map_user_iov(struct request *rq,
         * We link the bounce buffer in and could have to traverse it
         * later so we have to get a ref to prevent it from being freed
         */
-       ret = blk_rq_append_bio(rq, bio);
-       bio_get(bio);
+       ret = blk_rq_append_bio(rq, &bio);
        if (ret) {
-               bio_endio(bio);
                __blk_rq_unmap_user(orig_bio);
-               bio_put(bio);
                return ret;
        }
+       bio_get(bio);
 
        return 0;
 }
@@ -213,7 +218,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
        int reading = rq_data_dir(rq) == READ;
        unsigned long addr = (unsigned long) kbuf;
        int do_copy = 0;
-       struct bio *bio;
+       struct bio *bio, *orig_bio;
        int ret;
 
        if (len > (queue_max_hw_sectors(q) << 9))
@@ -236,10 +241,11 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
        if (do_copy)
                rq->rq_flags |= RQF_COPY_USER;
 
-       ret = blk_rq_append_bio(rq, bio);
+       orig_bio = bio;
+       ret = blk_rq_append_bio(rq, &bio);
        if (unlikely(ret)) {
                /* request is too big */
-               bio_put(bio);
+               bio_put(orig_bio);
                return ret;
        }
 
index 825bc29..d19f416 100644 (file)
@@ -2226,13 +2226,7 @@ again:
 out_unlock:
        spin_unlock_irq(q->queue_lock);
 out:
-       /*
-        * As multiple blk-throtls may stack in the same issue path, we
-        * don't want bios to leave with the flag set.  Clear the flag if
-        * being issued.
-        */
-       if (!throttled)
-               bio_clear_flag(bio, BIO_THROTTLED);
+       bio_set_flag(bio, BIO_THROTTLED);
 
 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
        if (throttled || !td->track_bio_latency)
index fceb1a9..1d05c42 100644 (file)
@@ -200,6 +200,7 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
        unsigned i = 0;
        bool bounce = false;
        int sectors = 0;
+       bool passthrough = bio_is_passthrough(*bio_orig);
 
        bio_for_each_segment(from, *bio_orig, iter) {
                if (i++ < BIO_MAX_PAGES)
@@ -210,13 +211,14 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
        if (!bounce)
                return;
 
-       if (sectors < bio_sectors(*bio_orig)) {
+       if (!passthrough && sectors < bio_sectors(*bio_orig)) {
                bio = bio_split(*bio_orig, sectors, GFP_NOIO, bounce_bio_split);
                bio_chain(bio, *bio_orig);
                generic_make_request(*bio_orig);
                *bio_orig = bio;
        }
-       bio = bio_clone_bioset(*bio_orig, GFP_NOIO, bounce_bio_set);
+       bio = bio_clone_bioset(*bio_orig, GFP_NOIO, passthrough ? NULL :
+                       bounce_bio_set);
 
        bio_for_each_segment_all(to, bio, i) {
                struct page *page = to->bv_page;
index b4df317..f95c607 100644 (file)
@@ -100,9 +100,13 @@ struct kyber_hctx_data {
        unsigned int cur_domain;
        unsigned int batching;
        wait_queue_entry_t domain_wait[KYBER_NUM_DOMAINS];
+       struct sbq_wait_state *domain_ws[KYBER_NUM_DOMAINS];
        atomic_t wait_index[KYBER_NUM_DOMAINS];
 };
 
+static int kyber_domain_wake(wait_queue_entry_t *wait, unsigned mode, int flags,
+                            void *key);
+
 static int rq_sched_domain(const struct request *rq)
 {
        unsigned int op = rq->cmd_flags;
@@ -385,6 +389,9 @@ static int kyber_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
 
        for (i = 0; i < KYBER_NUM_DOMAINS; i++) {
                INIT_LIST_HEAD(&khd->rqs[i]);
+               init_waitqueue_func_entry(&khd->domain_wait[i],
+                                         kyber_domain_wake);
+               khd->domain_wait[i].private = hctx;
                INIT_LIST_HEAD(&khd->domain_wait[i].entry);
                atomic_set(&khd->wait_index[i], 0);
        }
@@ -524,35 +531,39 @@ static int kyber_get_domain_token(struct kyber_queue_data *kqd,
        int nr;
 
        nr = __sbitmap_queue_get(domain_tokens);
-       if (nr >= 0)
-               return nr;
 
        /*
         * If we failed to get a domain token, make sure the hardware queue is
         * run when one becomes available. Note that this is serialized on
         * khd->lock, but we still need to be careful about the waker.
         */
-       if (list_empty_careful(&wait->entry)) {
-               init_waitqueue_func_entry(wait, kyber_domain_wake);
-               wait->private = hctx;
+       if (nr < 0 && list_empty_careful(&wait->entry)) {
                ws = sbq_wait_ptr(domain_tokens,
                                  &khd->wait_index[sched_domain]);
+               khd->domain_ws[sched_domain] = ws;
                add_wait_queue(&ws->wait, wait);
 
                /*
                 * Try again in case a token was freed before we got on the wait
-                * queue. The waker may have already removed the entry from the
-                * wait queue, but list_del_init() is okay with that.
+                * queue.
                 */
                nr = __sbitmap_queue_get(domain_tokens);
-               if (nr >= 0) {
-                       unsigned long flags;
+       }
 
-                       spin_lock_irqsave(&ws->wait.lock, flags);
-                       list_del_init(&wait->entry);
-                       spin_unlock_irqrestore(&ws->wait.lock, flags);
-               }
+       /*
+        * If we got a token while we were on the wait queue, remove ourselves
+        * from the wait queue to ensure that all wake ups make forward
+        * progress. It's possible that the waker already deleted the entry
+        * between the !list_empty_careful() check and us grabbing the lock, but
+        * list_del_init() is okay with that.
+        */
+       if (nr >= 0 && !list_empty_careful(&wait->entry)) {
+               ws = khd->domain_ws[sched_domain];
+               spin_lock_irq(&ws->wait.lock);
+               list_del_init(&wait->entry);
+               spin_unlock_irq(&ws->wait.lock);
        }
+
        return nr;
 }
 
index 415a54c..444a387 100644 (file)
@@ -1138,12 +1138,6 @@ int af_alg_get_rsgl(struct sock *sk, struct msghdr *msg, int flags,
                if (!af_alg_readable(sk))
                        break;
 
-               if (!ctx->used) {
-                       err = af_alg_wait_for_data(sk, flags);
-                       if (err)
-                               return err;
-               }
-
                seglen = min_t(size_t, (maxsize - len),
                               msg_data_left(msg));
 
index 48b34e9..ddcc45f 100644 (file)
@@ -111,6 +111,12 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
        size_t usedpages = 0;           /* [in]  RX bufs to be used from user */
        size_t processed = 0;           /* [in]  TX bufs to be consumed */
 
+       if (!ctx->used) {
+               err = af_alg_wait_for_data(sk, flags);
+               if (err)
+                       return err;
+       }
+
        /*
         * Data length provided by caller via sendmsg/sendpage that has not
         * yet been processed.
@@ -285,6 +291,10 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
                /* AIO operation */
                sock_hold(sk);
                areq->iocb = msg->msg_iocb;
+
+               /* Remember output size that will be generated. */
+               areq->outlen = outlen;
+
                aead_request_set_callback(&areq->cra_u.aead_req,
                                          CRYPTO_TFM_REQ_MAY_BACKLOG,
                                          af_alg_async_cb, areq);
@@ -292,12 +302,8 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
                                 crypto_aead_decrypt(&areq->cra_u.aead_req);
 
                /* AIO operation in progress */
-               if (err == -EINPROGRESS || err == -EBUSY) {
-                       /* Remember output size that will be generated. */
-                       areq->outlen = outlen;
-
+               if (err == -EINPROGRESS || err == -EBUSY)
                        return -EIOCBQUEUED;
-               }
 
                sock_put(sk);
        } else {
index 30cff82..baef9bf 100644 (file)
@@ -72,6 +72,12 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
        int err = 0;
        size_t len = 0;
 
+       if (!ctx->used) {
+               err = af_alg_wait_for_data(sk, flags);
+               if (err)
+                       return err;
+       }
+
        /* Allocate cipher request for current operation. */
        areq = af_alg_alloc_areq(sk, sizeof(struct af_alg_async_req) +
                                     crypto_skcipher_reqsize(tfm));
@@ -119,6 +125,10 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
                /* AIO operation */
                sock_hold(sk);
                areq->iocb = msg->msg_iocb;
+
+               /* Remember output size that will be generated. */
+               areq->outlen = len;
+
                skcipher_request_set_callback(&areq->cra_u.skcipher_req,
                                              CRYPTO_TFM_REQ_MAY_SLEEP,
                                              af_alg_async_cb, areq);
@@ -127,12 +137,8 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
                        crypto_skcipher_decrypt(&areq->cra_u.skcipher_req);
 
                /* AIO operation in progress */
-               if (err == -EINPROGRESS || err == -EBUSY) {
-                       /* Remember output size that will be generated. */
-                       areq->outlen = len;
-
+               if (err == -EINPROGRESS || err == -EBUSY)
                        return -EIOCBQUEUED;
-               }
 
                sock_put(sk);
        } else {
index 4e64726..eca04d3 100644 (file)
@@ -81,6 +81,7 @@ static int mcryptd_init_queue(struct mcryptd_queue *queue,
                pr_debug("cpu_queue #%d %p\n", cpu, queue->cpu_queue);
                crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
                INIT_WORK(&cpu_queue->work, mcryptd_queue_worker);
+               spin_lock_init(&cpu_queue->q_lock);
        }
        return 0;
 }
@@ -104,15 +105,16 @@ static int mcryptd_enqueue_request(struct mcryptd_queue *queue,
        int cpu, err;
        struct mcryptd_cpu_queue *cpu_queue;
 
-       cpu = get_cpu();
-       cpu_queue = this_cpu_ptr(queue->cpu_queue);
-       rctx->tag.cpu = cpu;
+       cpu_queue = raw_cpu_ptr(queue->cpu_queue);
+       spin_lock(&cpu_queue->q_lock);
+       cpu = smp_processor_id();
+       rctx->tag.cpu = smp_processor_id();
 
        err = crypto_enqueue_request(&cpu_queue->queue, request);
        pr_debug("enqueue request: cpu %d cpu_queue %p request %p\n",
                 cpu, cpu_queue, request);
+       spin_unlock(&cpu_queue->q_lock);
        queue_work_on(cpu, kcrypto_wq, &cpu_queue->work);
-       put_cpu();
 
        return err;
 }
@@ -161,16 +163,11 @@ static void mcryptd_queue_worker(struct work_struct *work)
        cpu_queue = container_of(work, struct mcryptd_cpu_queue, work);
        i = 0;
        while (i < MCRYPTD_BATCH || single_task_running()) {
-               /*
-                * preempt_disable/enable is used to prevent
-                * being preempted by mcryptd_enqueue_request()
-                */
-               local_bh_disable();
-               preempt_disable();
+
+               spin_lock_bh(&cpu_queue->q_lock);
                backlog = crypto_get_backlog(&cpu_queue->queue);
                req = crypto_dequeue_request(&cpu_queue->queue);
-               preempt_enable();
-               local_bh_enable();
+               spin_unlock_bh(&cpu_queue->q_lock);
 
                if (!req) {
                        mcryptd_opportunistic_flush();
@@ -185,7 +182,7 @@ static void mcryptd_queue_worker(struct work_struct *work)
                ++i;
        }
        if (cpu_queue->queue.qlen)
-               queue_work(kcrypto_wq, &cpu_queue->work);
+               queue_work_on(smp_processor_id(), kcrypto_wq, &cpu_queue->work);
 }
 
 void mcryptd_flusher(struct work_struct *__work)
index 778e0ff..11af5fd 100644 (file)
@@ -449,6 +449,8 @@ static int skcipher_walk_skcipher(struct skcipher_walk *walk,
 
        walk->total = req->cryptlen;
        walk->nbytes = 0;
+       walk->iv = req->iv;
+       walk->oiv = req->iv;
 
        if (unlikely(!walk->total))
                return 0;
@@ -456,9 +458,6 @@ static int skcipher_walk_skcipher(struct skcipher_walk *walk,
        scatterwalk_start(&walk->in, req->src);
        scatterwalk_start(&walk->out, req->dst);
 
-       walk->iv = req->iv;
-       walk->oiv = req->iv;
-
        walk->flags &= ~SKCIPHER_WALK_SLEEP;
        walk->flags |= req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
                       SKCIPHER_WALK_SLEEP : 0;
@@ -510,6 +509,8 @@ static int skcipher_walk_aead_common(struct skcipher_walk *walk,
        int err;
 
        walk->nbytes = 0;
+       walk->iv = req->iv;
+       walk->oiv = req->iv;
 
        if (unlikely(!walk->total))
                return 0;
@@ -525,9 +526,6 @@ static int skcipher_walk_aead_common(struct skcipher_walk *walk,
        scatterwalk_done(&walk->in, 0, walk->total);
        scatterwalk_done(&walk->out, 0, walk->total);
 
-       walk->iv = req->iv;
-       walk->oiv = req->iv;
-
        if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP)
                walk->flags |= SKCIPHER_WALK_SLEEP;
        else
index 6742f6c..9bff853 100644 (file)
@@ -1007,7 +1007,7 @@ skip:
        /* The record may be cleared by others, try read next record */
        if (len == -ENOENT)
                goto skip;
-       else if (len < sizeof(*rcd)) {
+       else if (len < 0 || len < sizeof(*rcd)) {
                rc = -EIO;
                goto out;
        }
index 30e84cc..06ea474 100644 (file)
@@ -1171,7 +1171,7 @@ int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
        struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
        struct cpc_register_resource *desired_reg;
        int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
-       struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
+       struct cppc_pcc_data *pcc_ss_data;
        int ret = 0;
 
        if (!cpc_desc || pcc_ss_id < 0) {
index ff2580e..abeb4df 100644 (file)
@@ -1670,6 +1670,11 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
                                dev_name(&adev_dimm->dev));
                return -ENXIO;
        }
+       /*
+        * Record nfit_mem for the notification path to track back to
+        * the nfit sysfs attributes for this dimm device object.
+        */
+       dev_set_drvdata(&adev_dimm->dev, nfit_mem);
 
        /*
         * Until standardization materializes we need to consider 4
@@ -1752,9 +1757,11 @@ static void shutdown_dimm_notify(void *data)
                        sysfs_put(nfit_mem->flags_attr);
                        nfit_mem->flags_attr = NULL;
                }
-               if (adev_dimm)
+               if (adev_dimm) {
                        acpi_remove_notify_handler(adev_dimm->handle,
                                        ACPI_DEVICE_NOTIFY, acpi_nvdimm_notify);
+                       dev_set_drvdata(&adev_dimm->dev, NULL);
+               }
        }
        mutex_unlock(&acpi_desc->init_mutex);
 }
index ccb9975..ad0477a 100644 (file)
@@ -35,13 +35,13 @@ static inline u64 mb_per_tick(int mbps)
 struct nullb_cmd {
        struct list_head list;
        struct llist_node ll_list;
-       call_single_data_t csd;
+       struct __call_single_data csd;
        struct request *rq;
        struct bio *bio;
        unsigned int tag;
+       blk_status_t error;
        struct nullb_queue *nq;
        struct hrtimer timer;
-       blk_status_t error;
 };
 
 struct nullb_queue {
index 647d056..b56c11f 100644 (file)
@@ -220,7 +220,8 @@ static bool clk_core_is_enabled(struct clk_core *core)
 
        ret = core->ops->is_enabled(core->hw);
 done:
-       clk_pm_runtime_put(core);
+       if (core->dev)
+               pm_runtime_put(core->dev);
 
        return ret;
 }
@@ -1564,6 +1565,9 @@ static void clk_change_rate(struct clk_core *core)
                best_parent_rate = core->parent->rate;
        }
 
+       if (clk_pm_runtime_get(core))
+               return;
+
        if (core->flags & CLK_SET_RATE_UNGATE) {
                unsigned long flags;
 
@@ -1634,6 +1638,8 @@ static void clk_change_rate(struct clk_core *core)
        /* handle the new child who might not be in core->children yet */
        if (core->new_child)
                clk_change_rate(core->new_child);
+
+       clk_pm_runtime_put(core);
 }
 
 static int clk_core_set_rate_nolock(struct clk_core *core,
index a1a6342..f00d875 100644 (file)
@@ -16,6 +16,7 @@
 
 #include <linux/clk.h>
 #include <linux/clk-provider.h>
+#include <linux/delay.h>
 #include <linux/init.h>
 #include <linux/of.h>
 #include <linux/of_device.h>
@@ -83,9 +84,20 @@ static int sun9i_mmc_reset_deassert(struct reset_controller_dev *rcdev,
        return 0;
 }
 
+static int sun9i_mmc_reset_reset(struct reset_controller_dev *rcdev,
+                                unsigned long id)
+{
+       sun9i_mmc_reset_assert(rcdev, id);
+       udelay(10);
+       sun9i_mmc_reset_deassert(rcdev, id);
+
+       return 0;
+}
+
 static const struct reset_control_ops sun9i_mmc_reset_ops = {
        .assert         = sun9i_mmc_reset_assert,
        .deassert       = sun9i_mmc_reset_deassert,
+       .reset          = sun9i_mmc_reset_reset,
 };
 
 static int sun9i_a80_mmc_config_clk_probe(struct platform_device *pdev)
index 58d4f4e..ca38229 100644 (file)
@@ -22,6 +22,8 @@
 
 #include "cpufreq_governor.h"
 
+#define CPUFREQ_DBS_MIN_SAMPLING_INTERVAL      (2 * TICK_NSEC / NSEC_PER_USEC)
+
 static DEFINE_PER_CPU(struct cpu_dbs_info, cpu_dbs);
 
 static DEFINE_MUTEX(gov_dbs_data_mutex);
@@ -47,11 +49,15 @@ ssize_t store_sampling_rate(struct gov_attr_set *attr_set, const char *buf,
 {
        struct dbs_data *dbs_data = to_dbs_data(attr_set);
        struct policy_dbs_info *policy_dbs;
+       unsigned int sampling_interval;
        int ret;
-       ret = sscanf(buf, "%u", &dbs_data->sampling_rate);
-       if (ret != 1)
+
+       ret = sscanf(buf, "%u", &sampling_interval);
+       if (ret != 1 || sampling_interval < CPUFREQ_DBS_MIN_SAMPLING_INTERVAL)
                return -EINVAL;
 
+       dbs_data->sampling_rate = sampling_interval;
+
        /*
         * We are operating under dbs_data->mutex and so the list and its
         * entries can't be freed concurrently.
@@ -430,7 +436,14 @@ int cpufreq_dbs_governor_init(struct cpufreq_policy *policy)
        if (ret)
                goto free_policy_dbs_info;
 
-       dbs_data->sampling_rate = cpufreq_policy_transition_delay_us(policy);
+       /*
+        * The sampling interval should not be less than the transition latency
+        * of the CPU and it also cannot be too small for dbs_update() to work
+        * correctly.
+        */
+       dbs_data->sampling_rate = max_t(unsigned int,
+                                       CPUFREQ_DBS_MIN_SAMPLING_INTERVAL,
+                                       cpufreq_policy_transition_delay_us(policy));
 
        if (!have_governor_per_policy())
                gov->gdbs_data = dbs_data;
index 628fe89..d9b2c2d 100644 (file)
@@ -226,17 +226,18 @@ static void imx6q_opp_check_speed_grading(struct device *dev)
        val >>= OCOTP_CFG3_SPEED_SHIFT;
        val &= 0x3;
 
-       if ((val != OCOTP_CFG3_SPEED_1P2GHZ) &&
-            of_machine_is_compatible("fsl,imx6q"))
-               if (dev_pm_opp_disable(dev, 1200000000))
-                       dev_warn(dev, "failed to disable 1.2GHz OPP\n");
        if (val < OCOTP_CFG3_SPEED_996MHZ)
                if (dev_pm_opp_disable(dev, 996000000))
                        dev_warn(dev, "failed to disable 996MHz OPP\n");
-       if (of_machine_is_compatible("fsl,imx6q")) {
+
+       if (of_machine_is_compatible("fsl,imx6q") ||
+           of_machine_is_compatible("fsl,imx6qp")) {
                if (val != OCOTP_CFG3_SPEED_852MHZ)
                        if (dev_pm_opp_disable(dev, 852000000))
                                dev_warn(dev, "failed to disable 852MHz OPP\n");
+               if (val != OCOTP_CFG3_SPEED_1P2GHZ)
+                       if (dev_pm_opp_disable(dev, 1200000000))
+                               dev_warn(dev, "failed to disable 1.2GHz OPP\n");
        }
        iounmap(base);
 put_node:
index fbab271..a861b5b 100644 (file)
@@ -708,7 +708,7 @@ atc_prep_dma_interleaved(struct dma_chan *chan,
                         unsigned long flags)
 {
        struct at_dma_chan      *atchan = to_at_dma_chan(chan);
-       struct data_chunk       *first = xt->sgl;
+       struct data_chunk       *first;
        struct at_desc          *desc = NULL;
        size_t                  xfer_count;
        unsigned int            dwidth;
@@ -720,6 +720,8 @@ atc_prep_dma_interleaved(struct dma_chan *chan,
        if (unlikely(!xt || xt->numf != 1 || !xt->frame_size))
                return NULL;
 
+       first = xt->sgl;
+
        dev_info(chan2dev(chan),
                 "%s: src=%pad, dest=%pad, numf=%d, frame_size=%d, flags=0x%lx\n",
                __func__, &xt->src_start, &xt->dst_start, xt->numf,
index d50273f..afd5e10 100644 (file)
@@ -555,7 +555,7 @@ static int jz4740_dma_probe(struct platform_device *pdev)
 
        ret = dma_async_device_register(dd);
        if (ret)
-               return ret;
+               goto err_clk;
 
        irq = platform_get_irq(pdev, 0);
        ret = request_irq(irq, jz4740_dma_irq, 0, dev_name(&pdev->dev), dmadev);
@@ -568,6 +568,8 @@ static int jz4740_dma_probe(struct platform_device *pdev)
 
 err_unregister:
        dma_async_device_unregister(dd);
+err_clk:
+       clk_disable_unprepare(dmadev->clk);
        return ret;
 }
 
index 47edc7f..ec5f9d2 100644 (file)
@@ -155,6 +155,12 @@ MODULE_PARM_DESC(run, "Run the test (default: false)");
 #define PATTERN_COUNT_MASK     0x1f
 #define PATTERN_MEMSET_IDX     0x01
 
+/* poor man's completion - we want to use wait_event_freezable() on it */
+struct dmatest_done {
+       bool                    done;
+       wait_queue_head_t       *wait;
+};
+
 struct dmatest_thread {
        struct list_head        node;
        struct dmatest_info     *info;
@@ -165,6 +171,8 @@ struct dmatest_thread {
        u8                      **dsts;
        u8                      **udsts;
        enum dma_transaction_type type;
+       wait_queue_head_t done_wait;
+       struct dmatest_done test_done;
        bool                    done;
 };
 
@@ -342,18 +350,25 @@ static unsigned int dmatest_verify(u8 **bufs, unsigned int start,
        return error_count;
 }
 
-/* poor man's completion - we want to use wait_event_freezable() on it */
-struct dmatest_done {
-       bool                    done;
-       wait_queue_head_t       *wait;
-};
 
 static void dmatest_callback(void *arg)
 {
        struct dmatest_done *done = arg;
-
-       done->done = true;
-       wake_up_all(done->wait);
+       struct dmatest_thread *thread =
+               container_of(arg, struct dmatest_thread, done_wait);
+       if (!thread->done) {
+               done->done = true;
+               wake_up_all(done->wait);
+       } else {
+               /*
+                * If thread->done, it means that this callback occurred
+                * after the parent thread has cleaned up. This can
+                * happen in the case that driver doesn't implement
+                * the terminate_all() functionality and a dma operation
+                * did not occur within the timeout period
+                */
+               WARN(1, "dmatest: Kernel memory may be corrupted!!\n");
+       }
 }
 
 static unsigned int min_odd(unsigned int x, unsigned int y)
@@ -424,9 +439,8 @@ static unsigned long long dmatest_KBs(s64 runtime, unsigned long long len)
  */
 static int dmatest_func(void *data)
 {
-       DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_wait);
        struct dmatest_thread   *thread = data;
-       struct dmatest_done     done = { .wait = &done_wait };
+       struct dmatest_done     *done = &thread->test_done;
        struct dmatest_info     *info;
        struct dmatest_params   *params;
        struct dma_chan         *chan;
@@ -673,9 +687,9 @@ static int dmatest_func(void *data)
                        continue;
                }
 
-               done.done = false;
+               done->done = false;
                tx->callback = dmatest_callback;
-               tx->callback_param = &done;
+               tx->callback_param = done;
                cookie = tx->tx_submit(tx);
 
                if (dma_submit_error(cookie)) {
@@ -688,21 +702,12 @@ static int dmatest_func(void *data)
                }
                dma_async_issue_pending(chan);
 
-               wait_event_freezable_timeout(done_wait, done.done,
+               wait_event_freezable_timeout(thread->done_wait, done->done,
                                             msecs_to_jiffies(params->timeout));
 
                status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
 
-               if (!done.done) {
-                       /*
-                        * We're leaving the timed out dma operation with
-                        * dangling pointer to done_wait.  To make this
-                        * correct, we'll need to allocate wait_done for
-                        * each test iteration and perform "who's gonna
-                        * free it this time?" dancing.  For now, just
-                        * leave it dangling.
-                        */
-                       WARN(1, "dmatest: Kernel stack may be corrupted!!\n");
+               if (!done->done) {
                        dmaengine_unmap_put(um);
                        result("test timed out", total_tests, src_off, dst_off,
                               len, 0);
@@ -789,7 +794,7 @@ err_thread_type:
                dmatest_KBs(runtime, total_len), ret);
 
        /* terminate all transfers on specified channels */
-       if (ret)
+       if (ret || failed_tests)
                dmaengine_terminate_all(chan);
 
        thread->done = true;
@@ -849,6 +854,8 @@ static int dmatest_add_threads(struct dmatest_info *info,
                thread->info = info;
                thread->chan = dtc->chan;
                thread->type = type;
+               thread->test_done.wait = &thread->done_wait;
+               init_waitqueue_head(&thread->done_wait);
                smp_wmb();
                thread->task = kthread_create(dmatest_func, thread, "%s-%s%u",
                                dma_chan_name(chan), op, i);
index 6775f2c..c756886 100644 (file)
@@ -863,11 +863,11 @@ static void fsl_edma_irq_exit(
        }
 }
 
-static void fsl_disable_clocks(struct fsl_edma_engine *fsl_edma)
+static void fsl_disable_clocks(struct fsl_edma_engine *fsl_edma, int nr_clocks)
 {
        int i;
 
-       for (i = 0; i < DMAMUX_NR; i++)
+       for (i = 0; i < nr_clocks; i++)
                clk_disable_unprepare(fsl_edma->muxclk[i]);
 }
 
@@ -904,25 +904,25 @@ static int fsl_edma_probe(struct platform_device *pdev)
 
                res = platform_get_resource(pdev, IORESOURCE_MEM, 1 + i);
                fsl_edma->muxbase[i] = devm_ioremap_resource(&pdev->dev, res);
-               if (IS_ERR(fsl_edma->muxbase[i]))
+               if (IS_ERR(fsl_edma->muxbase[i])) {
+                       /* on error: disable all previously enabled clks */
+                       fsl_disable_clocks(fsl_edma, i);
                        return PTR_ERR(fsl_edma->muxbase[i]);
+               }
 
                sprintf(clkname, "dmamux%d", i);
                fsl_edma->muxclk[i] = devm_clk_get(&pdev->dev, clkname);
                if (IS_ERR(fsl_edma->muxclk[i])) {
                        dev_err(&pdev->dev, "Missing DMAMUX block clock.\n");
+                       /* on error: disable all previously enabled clks */
+                       fsl_disable_clocks(fsl_edma, i);
                        return PTR_ERR(fsl_edma->muxclk[i]);
                }
 
                ret = clk_prepare_enable(fsl_edma->muxclk[i]);
-               if (ret) {
-                       /* disable only clks which were enabled on error */
-                       for (; i >= 0; i--)
-                               clk_disable_unprepare(fsl_edma->muxclk[i]);
-
-                       dev_err(&pdev->dev, "DMAMUX clk block failed.\n");
-                       return ret;
-               }
+               if (ret)
+                       /* on error: disable all previously enabled clks */
+                       fsl_disable_clocks(fsl_edma, i);
 
        }
 
@@ -976,7 +976,7 @@ static int fsl_edma_probe(struct platform_device *pdev)
        if (ret) {
                dev_err(&pdev->dev,
                        "Can't register Freescale eDMA engine. (%d)\n", ret);
-               fsl_disable_clocks(fsl_edma);
+               fsl_disable_clocks(fsl_edma, DMAMUX_NR);
                return ret;
        }
 
@@ -985,7 +985,7 @@ static int fsl_edma_probe(struct platform_device *pdev)
                dev_err(&pdev->dev,
                        "Can't register Freescale eDMA of_dma. (%d)\n", ret);
                dma_async_device_unregister(&fsl_edma->dma_dev);
-               fsl_disable_clocks(fsl_edma);
+               fsl_disable_clocks(fsl_edma, DMAMUX_NR);
                return ret;
        }
 
@@ -1015,7 +1015,7 @@ static int fsl_edma_remove(struct platform_device *pdev)
        fsl_edma_cleanup_vchan(&fsl_edma->dma_dev);
        of_dma_controller_free(np);
        dma_async_device_unregister(&fsl_edma->dma_dev);
-       fsl_disable_clocks(fsl_edma);
+       fsl_disable_clocks(fsl_edma, DMAMUX_NR);
 
        return 0;
 }
index 2f31d3d..7792a91 100644 (file)
@@ -390,7 +390,7 @@ static int ioat_dma_self_test(struct ioatdma_device *ioat_dma)
        if (memcmp(src, dest, IOAT_TEST_SIZE)) {
                dev_err(dev, "Self-test copy failed compare, disabling\n");
                err = -ENODEV;
-               goto free_resources;
+               goto unmap_dma;
        }
 
 unmap_dma:
index 23e771d..e85903e 100644 (file)
@@ -103,8 +103,8 @@ static int gpio_reg_to_irq(struct gpio_chip *gc, unsigned offset)
        struct gpio_reg *r = to_gpio_reg(gc);
        int irq = r->irqs[offset];
 
-       if (irq >= 0 && r->irq.domain)
-               irq = irq_find_mapping(r->irq.domain, irq);
+       if (irq >= 0 && r->irqdomain)
+               irq = irq_find_mapping(r->irqdomain, irq);
 
        return irq;
 }
index eb4528c..d6f3d9e 100644 (file)
@@ -1074,7 +1074,7 @@ void acpi_gpiochip_add(struct gpio_chip *chip)
        }
 
        if (!chip->names)
-               devprop_gpiochip_set_names(chip);
+               devprop_gpiochip_set_names(chip, dev_fwnode(chip->parent));
 
        acpi_gpiochip_request_regions(acpi_gpio);
        acpi_gpiochip_scan_gpios(acpi_gpio);
index 27f383b..f748aa3 100644 (file)
 /**
  * devprop_gpiochip_set_names - Set GPIO line names using device properties
  * @chip: GPIO chip whose lines should be named, if possible
+ * @fwnode: Property Node containing the gpio-line-names property
  *
  * Looks for device property "gpio-line-names" and if it exists assigns
  * GPIO line names for the chip. The memory allocated for the assigned
  * names belong to the underlying firmware node and should not be released
  * by the caller.
  */
-void devprop_gpiochip_set_names(struct gpio_chip *chip)
+void devprop_gpiochip_set_names(struct gpio_chip *chip,
+                               const struct fwnode_handle *fwnode)
 {
        struct gpio_device *gdev = chip->gpiodev;
        const char **names;
        int ret, i;
 
-       if (!chip->parent) {
-               dev_warn(&gdev->dev, "GPIO chip parent is NULL\n");
-               return;
-       }
-
-       ret = device_property_read_string_array(chip->parent, "gpio-line-names",
+       ret = fwnode_property_read_string_array(fwnode, "gpio-line-names",
                                                NULL, 0);
        if (ret < 0)
                return;
 
        if (ret != gdev->ngpio) {
-               dev_warn(chip->parent,
+               dev_warn(&gdev->dev,
                         "names %d do not match number of GPIOs %d\n", ret,
                         gdev->ngpio);
                return;
@@ -52,10 +49,10 @@ void devprop_gpiochip_set_names(struct gpio_chip *chip)
        if (!names)
                return;
 
-       ret = device_property_read_string_array(chip->parent, "gpio-line-names",
+       ret = fwnode_property_read_string_array(fwnode, "gpio-line-names",
                                                names, gdev->ngpio);
        if (ret < 0) {
-               dev_warn(chip->parent, "failed to read GPIO line names\n");
+               dev_warn(&gdev->dev, "failed to read GPIO line names\n");
                kfree(names);
                return;
        }
index e0d59e6..72a0695 100644 (file)
@@ -493,7 +493,8 @@ int of_gpiochip_add(struct gpio_chip *chip)
 
        /* If the chip defines names itself, these take precedence */
        if (!chip->names)
-               devprop_gpiochip_set_names(chip);
+               devprop_gpiochip_set_names(chip,
+                                          of_fwnode_handle(chip->of_node));
 
        of_node_get(chip->of_node);
 
index af48322..6c44d16 100644 (file)
@@ -228,7 +228,8 @@ static inline int gpio_chip_hwgpio(const struct gpio_desc *desc)
        return desc - &desc->gdev->descs[0];
 }
 
-void devprop_gpiochip_set_names(struct gpio_chip *chip);
+void devprop_gpiochip_set_names(struct gpio_chip *chip,
+                               const struct fwnode_handle *fwnode);
 
 /* With descriptor prefix */
 
index da43813..5aeb5f8 100644 (file)
@@ -2467,7 +2467,7 @@ static int gfx_v9_0_kiq_kcq_enable(struct amdgpu_device *adev)
                                  PACKET3_MAP_QUEUES_PIPE(ring->pipe) |
                                  PACKET3_MAP_QUEUES_ME((ring->me == 1 ? 0 : 1)) |
                                  PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */
-                                 PACKET3_MAP_QUEUES_ALLOC_FORMAT(1) | /* alloc format: all_on_one_pipe */
+                                 PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | /* alloc format: all_on_one_pipe */
                                  PACKET3_MAP_QUEUES_ENGINE_SEL(0) | /* engine_sel: compute */
                                  PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */
                amdgpu_ring_write(kiq_ring, PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index));
index f71fe6d..bb5fa89 100644 (file)
@@ -2336,7 +2336,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
                       const struct dm_connector_state *dm_state)
 {
        struct drm_display_mode *preferred_mode = NULL;
-       const struct drm_connector *drm_connector;
+       struct drm_connector *drm_connector;
        struct dc_stream_state *stream = NULL;
        struct drm_display_mode mode = *drm_mode;
        bool native_mode_found = false;
@@ -2355,11 +2355,13 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
 
        if (!aconnector->dc_sink) {
                /*
-                * Exclude MST from creating fake_sink
-                * TODO: need to enable MST into fake_sink feature
+                * Create dc_sink when necessary to MST
+                * Don't apply fake_sink to MST
                 */
-               if (aconnector->mst_port)
-                       goto stream_create_fail;
+               if (aconnector->mst_port) {
+                       dm_dp_mst_dc_sink_create(drm_connector);
+                       goto mst_dc_sink_create_done;
+               }
 
                if (create_fake_sink(aconnector))
                        goto stream_create_fail;
@@ -2410,6 +2412,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
 stream_create_fail:
 dm_state_null:
 drm_connector_null:
+mst_dc_sink_create_done:
        return stream;
 }
 
index 117521c..0230250 100644 (file)
@@ -189,6 +189,8 @@ struct amdgpu_dm_connector {
        struct mutex hpd_lock;
 
        bool fake_enable;
+
+       bool mst_connected;
 };
 
 #define to_amdgpu_dm_connector(x) container_of(x, struct amdgpu_dm_connector, base)
index f8efb98..638c2c2 100644 (file)
@@ -185,6 +185,42 @@ static int dm_connector_update_modes(struct drm_connector *connector,
        return ret;
 }
 
+void dm_dp_mst_dc_sink_create(struct drm_connector *connector)
+{
+       struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+       struct edid *edid;
+       struct dc_sink *dc_sink;
+       struct dc_sink_init_data init_params = {
+                       .link = aconnector->dc_link,
+                       .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST };
+
+       edid = drm_dp_mst_get_edid(connector, &aconnector->mst_port->mst_mgr, aconnector->port);
+
+       if (!edid) {
+               drm_mode_connector_update_edid_property(
+                       &aconnector->base,
+                       NULL);
+               return;
+       }
+
+       aconnector->edid = edid;
+
+       dc_sink = dc_link_add_remote_sink(
+               aconnector->dc_link,
+               (uint8_t *)aconnector->edid,
+               (aconnector->edid->extensions + 1) * EDID_LENGTH,
+               &init_params);
+
+       dc_sink->priv = aconnector;
+       aconnector->dc_sink = dc_sink;
+
+       amdgpu_dm_add_sink_to_freesync_module(
+                       connector, aconnector->edid);
+
+       drm_mode_connector_update_edid_property(
+                                       &aconnector->base, aconnector->edid);
+}
+
 static int dm_dp_mst_get_modes(struct drm_connector *connector)
 {
        struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
@@ -311,6 +347,7 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
                        drm_mode_connector_set_path_property(connector, pathprop);
 
                        drm_connector_list_iter_end(&conn_iter);
+                       aconnector->mst_connected = true;
                        return &aconnector->base;
                }
        }
@@ -363,6 +400,8 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
         */
        amdgpu_dm_connector_funcs_reset(connector);
 
+       aconnector->mst_connected = true;
+
        DRM_INFO("DM_MST: added connector: %p [id: %d] [master: %p]\n",
                        aconnector, connector->base.id, aconnector->mst_port);
 
@@ -394,6 +433,8 @@ static void dm_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
        drm_mode_connector_update_edid_property(
                        &aconnector->base,
                        NULL);
+
+       aconnector->mst_connected = false;
 }
 
 static void dm_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr)
@@ -404,10 +445,18 @@ static void dm_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr)
        drm_kms_helper_hotplug_event(dev);
 }
 
+static void dm_dp_mst_link_status_reset(struct drm_connector *connector)
+{
+       mutex_lock(&connector->dev->mode_config.mutex);
+       drm_mode_connector_set_link_status_property(connector, DRM_MODE_LINK_STATUS_BAD);
+       mutex_unlock(&connector->dev->mode_config.mutex);
+}
+
 static void dm_dp_mst_register_connector(struct drm_connector *connector)
 {
        struct drm_device *dev = connector->dev;
        struct amdgpu_device *adev = dev->dev_private;
+       struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
 
        if (adev->mode_info.rfbdev)
                drm_fb_helper_add_one_connector(&adev->mode_info.rfbdev->helper, connector);
@@ -416,6 +465,8 @@ static void dm_dp_mst_register_connector(struct drm_connector *connector)
 
        drm_connector_register(connector);
 
+       if (aconnector->mst_connected)
+               dm_dp_mst_link_status_reset(connector);
 }
 
 static const struct drm_dp_mst_topology_cbs dm_mst_cbs = {
index 2da851b..8cf51da 100644 (file)
@@ -31,5 +31,6 @@ struct amdgpu_dm_connector;
 
 void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
                                       struct amdgpu_dm_connector *aconnector);
+void dm_dp_mst_dc_sink_create(struct drm_connector *connector);
 
 #endif
index 3dce35e..b142629 100644 (file)
@@ -900,6 +900,15 @@ bool dcn_validate_bandwidth(
                        v->override_vta_ps[input_idx] = pipe->plane_res.scl_data.taps.v_taps;
                        v->override_hta_pschroma[input_idx] = pipe->plane_res.scl_data.taps.h_taps_c;
                        v->override_vta_pschroma[input_idx] = pipe->plane_res.scl_data.taps.v_taps_c;
+                       /*
+                        * Spreadsheet doesn't handle taps_c is one properly,
+                        * need to force Chroma to always be scaled to pass
+                        * bandwidth validation.
+                        */
+                       if (v->override_hta_pschroma[input_idx] == 1)
+                               v->override_hta_pschroma[input_idx] = 2;
+                       if (v->override_vta_pschroma[input_idx] == 1)
+                               v->override_vta_pschroma[input_idx] = 2;
                        v->source_scan[input_idx] = (pipe->plane_state->rotation % 2) ? dcn_bw_vert : dcn_bw_hor;
                }
                if (v->is_line_buffer_bpp_fixed == dcn_bw_yes)
index e27ed4a..42a111b 100644 (file)
@@ -1801,7 +1801,7 @@ static void disable_link(struct dc_link *link, enum signal_type signal)
                link->link_enc->funcs->disable_output(link->link_enc, signal, link);
 }
 
-bool dp_active_dongle_validate_timing(
+static bool dp_active_dongle_validate_timing(
                const struct dc_crtc_timing *timing,
                const struct dc_dongle_caps *dongle_caps)
 {
@@ -1833,6 +1833,8 @@ bool dp_active_dongle_validate_timing(
        /* Check Color Depth and Pixel Clock */
        if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
                required_pix_clk /= 2;
+       else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422)
+               required_pix_clk = required_pix_clk * 2 / 3;
 
        switch (timing->display_color_depth) {
        case COLOR_DEPTH_666:
index 07ff8d2..d844fad 100644 (file)
@@ -2866,16 +2866,19 @@ static void dce110_apply_ctx_for_surface(
                int num_planes,
                struct dc_state *context)
 {
-       int i, be_idx;
+       int i;
 
        if (num_planes == 0)
                return;
 
-       be_idx = -1;
        for (i = 0; i < dc->res_pool->pipe_count; i++) {
-               if (stream == context->res_ctx.pipe_ctx[i].stream) {
-                       be_idx = context->res_ctx.pipe_ctx[i].stream_res.tg->inst;
-                       break;
+               struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+               struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+
+               if (stream == pipe_ctx->stream) {
+                       if (!pipe_ctx->top_pipe &&
+                               (pipe_ctx->plane_state || old_pipe_ctx->plane_state))
+                               dc->hwss.pipe_control_lock(dc, pipe_ctx, true);
                }
        }
 
@@ -2895,9 +2898,22 @@ static void dce110_apply_ctx_for_surface(
                                        context->stream_count);
 
                dce110_program_front_end_for_pipe(dc, pipe_ctx);
+
+               dc->hwss.update_plane_addr(dc, pipe_ctx);
+
                program_surface_visibility(dc, pipe_ctx);
 
        }
+
+       for (i = 0; i < dc->res_pool->pipe_count; i++) {
+               struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+               struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+
+               if ((stream == pipe_ctx->stream) &&
+                       (!pipe_ctx->top_pipe) &&
+                       (pipe_ctx->plane_state || old_pipe_ctx->plane_state))
+                       dc->hwss.pipe_control_lock(dc, pipe_ctx, false);
+       }
 }
 
 static void dce110_power_down_fe(struct dc *dc, int fe_idx)
index 74e7c82..a9d55d0 100644 (file)
@@ -159,11 +159,10 @@ bool dpp_get_optimal_number_of_taps(
                        scl_data->taps.h_taps = 1;
                if (IDENTITY_RATIO(scl_data->ratios.vert))
                        scl_data->taps.v_taps = 1;
-               /*
-                * Spreadsheet doesn't handle taps_c is one properly,
-                * need to force Chroma to always be scaled to pass
-                * bandwidth validation.
-                */
+               if (IDENTITY_RATIO(scl_data->ratios.horz_c))
+                       scl_data->taps.h_taps_c = 1;
+               if (IDENTITY_RATIO(scl_data->ratios.vert_c))
+                       scl_data->taps.v_taps_c = 1;
        }
 
        return true;
index 59849f0..1402c0e 100644 (file)
@@ -220,17 +220,6 @@ static struct drm_master *drm_lease_create(struct drm_master *lessor, struct idr
 
        mutex_lock(&dev->mode_config.idr_mutex);
 
-       /* Insert the new lessee into the tree */
-       id = idr_alloc(&(drm_lease_owner(lessor)->lessee_idr), lessee, 1, 0, GFP_KERNEL);
-       if (id < 0) {
-               error = id;
-               goto out_lessee;
-       }
-
-       lessee->lessee_id = id;
-       lessee->lessor = drm_master_get(lessor);
-       list_add_tail(&lessee->lessee_list, &lessor->lessees);
-
        idr_for_each_entry(leases, entry, object) {
                error = 0;
                if (!idr_find(&dev->mode_config.crtc_idr, object))
@@ -246,6 +235,17 @@ static struct drm_master *drm_lease_create(struct drm_master *lessor, struct idr
                }
        }
 
+       /* Insert the new lessee into the tree */
+       id = idr_alloc(&(drm_lease_owner(lessor)->lessee_idr), lessee, 1, 0, GFP_KERNEL);
+       if (id < 0) {
+               error = id;
+               goto out_lessee;
+       }
+
+       lessee->lessee_id = id;
+       lessee->lessor = drm_master_get(lessor);
+       list_add_tail(&lessee->lessee_list, &lessor->lessees);
+
        /* Move the leases over */
        lessee->leases = *leases;
        DRM_DEBUG_LEASE("new lessee %d %p, lessor %d %p\n", lessee->lessee_id, lessee, lessor->lessee_id, lessor);
index 37a93cd..2c90519 100644 (file)
@@ -558,11 +558,10 @@ int drm_plane_check_pixel_format(const struct drm_plane *plane, u32 format)
 }
 
 /*
- * setplane_internal - setplane handler for internal callers
+ * __setplane_internal - setplane handler for internal callers
  *
- * Note that we assume an extra reference has already been taken on fb.  If the
- * update fails, this reference will be dropped before return; if it succeeds,
- * the previous framebuffer (if any) will be unreferenced instead.
+ * This function will take a reference on the new fb for the plane
+ * on success.
  *
  * src_{x,y,w,h} are provided in 16.16 fixed point format
  */
@@ -630,14 +629,12 @@ static int __setplane_internal(struct drm_plane *plane,
        if (!ret) {
                plane->crtc = crtc;
                plane->fb = fb;
-               fb = NULL;
+               drm_framebuffer_get(plane->fb);
        } else {
                plane->old_fb = NULL;
        }
 
 out:
-       if (fb)
-               drm_framebuffer_put(fb);
        if (plane->old_fb)
                drm_framebuffer_put(plane->old_fb);
        plane->old_fb = NULL;
@@ -685,6 +682,7 @@ int drm_mode_setplane(struct drm_device *dev, void *data,
        struct drm_plane *plane;
        struct drm_crtc *crtc = NULL;
        struct drm_framebuffer *fb = NULL;
+       int ret;
 
        if (!drm_core_check_feature(dev, DRIVER_MODESET))
                return -EINVAL;
@@ -717,15 +715,16 @@ int drm_mode_setplane(struct drm_device *dev, void *data,
                }
        }
 
-       /*
-        * setplane_internal will take care of deref'ing either the old or new
-        * framebuffer depending on success.
-        */
-       return setplane_internal(plane, crtc, fb,
-                                plane_req->crtc_x, plane_req->crtc_y,
-                                plane_req->crtc_w, plane_req->crtc_h,
-                                plane_req->src_x, plane_req->src_y,
-                                plane_req->src_w, plane_req->src_h);
+       ret = setplane_internal(plane, crtc, fb,
+                               plane_req->crtc_x, plane_req->crtc_y,
+                               plane_req->crtc_w, plane_req->crtc_h,
+                               plane_req->src_x, plane_req->src_y,
+                               plane_req->src_w, plane_req->src_h);
+
+       if (fb)
+               drm_framebuffer_put(fb);
+
+       return ret;
 }
 
 static int drm_mode_cursor_universal(struct drm_crtc *crtc,
@@ -788,13 +787,12 @@ static int drm_mode_cursor_universal(struct drm_crtc *crtc,
                src_h = fb->height << 16;
        }
 
-       /*
-        * setplane_internal will take care of deref'ing either the old or new
-        * framebuffer depending on success.
-        */
        ret = __setplane_internal(crtc->cursor, crtc, fb,
-                               crtc_x, crtc_y, crtc_w, crtc_h,
-                               0, 0, src_w, src_h, ctx);
+                                 crtc_x, crtc_y, crtc_w, crtc_h,
+                                 0, 0, src_w, src_h, ctx);
+
+       if (fb)
+               drm_framebuffer_put(fb);
 
        /* Update successful; save new cursor position, if necessary */
        if (ret == 0 && req->flags & DRM_MODE_CURSOR_MOVE) {
index f776fc1..cb4d09c 100644 (file)
@@ -369,40 +369,26 @@ static const struct file_operations drm_syncobj_file_fops = {
        .release = drm_syncobj_file_release,
 };
 
-static int drm_syncobj_alloc_file(struct drm_syncobj *syncobj)
-{
-       struct file *file = anon_inode_getfile("syncobj_file",
-                                              &drm_syncobj_file_fops,
-                                              syncobj, 0);
-       if (IS_ERR(file))
-               return PTR_ERR(file);
-
-       drm_syncobj_get(syncobj);
-       if (cmpxchg(&syncobj->file, NULL, file)) {
-               /* lost the race */
-               fput(file);
-       }
-
-       return 0;
-}
-
 int drm_syncobj_get_fd(struct drm_syncobj *syncobj, int *p_fd)
 {
-       int ret;
+       struct file *file;
        int fd;
 
        fd = get_unused_fd_flags(O_CLOEXEC);
        if (fd < 0)
                return fd;
 
-       if (!syncobj->file) {
-               ret = drm_syncobj_alloc_file(syncobj);
-               if (ret) {
-                       put_unused_fd(fd);
-                       return ret;
-               }
+       file = anon_inode_getfile("syncobj_file",
+                                 &drm_syncobj_file_fops,
+                                 syncobj, 0);
+       if (IS_ERR(file)) {
+               put_unused_fd(fd);
+               return PTR_ERR(file);
        }
-       fd_install(fd, syncobj->file);
+
+       drm_syncobj_get(syncobj);
+       fd_install(fd, file);
+
        *p_fd = fd;
        return 0;
 }
@@ -422,31 +408,24 @@ static int drm_syncobj_handle_to_fd(struct drm_file *file_private,
        return ret;
 }
 
-static struct drm_syncobj *drm_syncobj_fdget(int fd)
-{
-       struct file *file = fget(fd);
-
-       if (!file)
-               return NULL;
-       if (file->f_op != &drm_syncobj_file_fops)
-               goto err;
-
-       return file->private_data;
-err:
-       fput(file);
-       return NULL;
-};
-
 static int drm_syncobj_fd_to_handle(struct drm_file *file_private,
                                    int fd, u32 *handle)
 {
-       struct drm_syncobj *syncobj = drm_syncobj_fdget(fd);
+       struct drm_syncobj *syncobj;
+       struct file *file;
        int ret;
 
-       if (!syncobj)
+       file = fget(fd);
+       if (!file)
                return -EINVAL;
 
+       if (file->f_op != &drm_syncobj_file_fops) {
+               fput(file);
+               return -EINVAL;
+       }
+
        /* take a reference to put in the idr */
+       syncobj = file->private_data;
        drm_syncobj_get(syncobj);
 
        idr_preload(GFP_KERNEL);
@@ -455,12 +434,14 @@ static int drm_syncobj_fd_to_handle(struct drm_file *file_private,
        spin_unlock(&file_private->syncobj_table_lock);
        idr_preload_end();
 
-       if (ret < 0) {
-               fput(syncobj->file);
-               return ret;
-       }
-       *handle = ret;
-       return 0;
+       if (ret > 0) {
+               *handle = ret;
+               ret = 0;
+       } else
+               drm_syncobj_put(syncobj);
+
+       fput(file);
+       return ret;
 }
 
 static int drm_syncobj_import_sync_file_fence(struct drm_file *file_private,
index 3551208..309f3fa 100644 (file)
@@ -266,6 +266,8 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
        /* Clear host CRT status, so guest couldn't detect this host CRT. */
        if (IS_BROADWELL(dev_priv))
                vgpu_vreg(vgpu, PCH_ADPA) &= ~ADPA_CRT_HOTPLUG_MONITOR_MASK;
+
+       vgpu_vreg(vgpu, PIPECONF(PIPE_A)) |= PIPECONF_ENABLE;
 }
 
 static void clean_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num)
@@ -282,7 +284,6 @@ static void clean_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num)
 static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num,
                                    int type, unsigned int resolution)
 {
-       struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
        struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num);
 
        if (WARN_ON(resolution >= GVT_EDID_NUM))
@@ -308,7 +309,7 @@ static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num,
        port->type = type;
 
        emulate_monitor_status_change(vgpu);
-       vgpu_vreg(vgpu, PIPECONF(PIPE_A)) |= PIPECONF_ENABLE;
+
        return 0;
 }
 
index ad4050f..18de656 100644 (file)
@@ -330,17 +330,10 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj)
         * must wait for all rendering to complete to the object (as unbinding
         * must anyway), and retire the requests.
         */
-       ret = i915_gem_object_wait(obj,
-                                  I915_WAIT_INTERRUPTIBLE |
-                                  I915_WAIT_LOCKED |
-                                  I915_WAIT_ALL,
-                                  MAX_SCHEDULE_TIMEOUT,
-                                  NULL);
+       ret = i915_gem_object_set_to_cpu_domain(obj, false);
        if (ret)
                return ret;
 
-       i915_gem_retire_requests(to_i915(obj->base.dev));
-
        while ((vma = list_first_entry_or_null(&obj->vma_list,
                                               struct i915_vma,
                                               obj_link))) {
index e8ca67a..ac236b8 100644 (file)
@@ -367,6 +367,7 @@ struct i915_sw_dma_fence_cb {
        struct dma_fence *dma;
        struct timer_list timer;
        struct irq_work work;
+       struct rcu_head rcu;
 };
 
 static void timer_i915_sw_fence_wake(struct timer_list *t)
@@ -406,7 +407,7 @@ static void irq_i915_sw_fence_work(struct irq_work *wrk)
        del_timer_sync(&cb->timer);
        dma_fence_put(cb->dma);
 
-       kfree(cb);
+       kfree_rcu(cb, rcu);
 }
 
 int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
index 5f8b9f1..bcbc7ab 100644 (file)
@@ -186,7 +186,7 @@ void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine)
        struct intel_wait *wait, *n, *first;
 
        if (!b->irq_armed)
-               return;
+               goto wakeup_signaler;
 
        /* We only disarm the irq when we are idle (all requests completed),
         * so if the bottom-half remains asleep, it missed the request
@@ -208,6 +208,14 @@ void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine)
        b->waiters = RB_ROOT;
 
        spin_unlock_irq(&b->rb_lock);
+
+       /*
+        * The signaling thread may be asleep holding a reference to a request,
+        * that had its signaling cancelled prior to being preempted. We need
+        * to kick the signaler, just in case, to release any such reference.
+        */
+wakeup_signaler:
+       wake_up_process(b->signaler);
 }
 
 static bool use_fake_irq(const struct intel_breadcrumbs *b)
@@ -651,23 +659,15 @@ static int intel_breadcrumbs_signaler(void *arg)
                }
 
                if (unlikely(do_schedule)) {
-                       DEFINE_WAIT(exec);
-
                        if (kthread_should_park())
                                kthread_parkme();
 
-                       if (kthread_should_stop()) {
-                               GEM_BUG_ON(request);
+                       if (unlikely(kthread_should_stop())) {
+                               i915_gem_request_put(request);
                                break;
                        }
 
-                       if (request)
-                               add_wait_queue(&request->execute, &exec);
-
                        schedule();
-
-                       if (request)
-                               remove_wait_queue(&request->execute, &exec);
                }
                i915_gem_request_put(request);
        } while (1);
index e0843bb..58a3755 100644 (file)
@@ -2128,6 +2128,8 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder,
        if (WARN_ON(!pll))
                return;
 
+        mutex_lock(&dev_priv->dpll_lock);
+
        if (IS_CANNONLAKE(dev_priv)) {
                /* Configure DPCLKA_CFGCR0 to map the DPLL to the DDI. */
                val = I915_READ(DPCLKA_CFGCR0);
@@ -2157,6 +2159,8 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder,
        } else if (INTEL_INFO(dev_priv)->gen < 9) {
                I915_WRITE(PORT_CLK_SEL(port), hsw_pll_to_ddi_pll_sel(pll));
        }
+
+       mutex_unlock(&dev_priv->dpll_lock);
 }
 
 static void intel_ddi_clk_disable(struct intel_encoder *encoder)
index e8ccf89..30cf273 100644 (file)
@@ -9944,11 +9944,10 @@ found:
        }
 
        ret = intel_modeset_setup_plane_state(state, crtc, mode, fb, 0, 0);
+       drm_framebuffer_put(fb);
        if (ret)
                goto fail;
 
-       drm_framebuffer_put(fb);
-
        ret = drm_atomic_set_mode_for_crtc(&crtc_state->base, mode);
        if (ret)
                goto fail;
@@ -13195,7 +13194,7 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
        primary->frontbuffer_bit = INTEL_FRONTBUFFER_PRIMARY(pipe);
        primary->check_plane = intel_check_primary_plane;
 
-       if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
+       if (INTEL_GEN(dev_priv) >= 10) {
                intel_primary_formats = skl_primary_formats;
                num_formats = ARRAY_SIZE(skl_primary_formats);
                modifiers = skl_format_modifiers_ccs;
index 3bf6528..5809b29 100644 (file)
@@ -193,7 +193,7 @@ static bool lpe_audio_detect(struct drm_i915_private *dev_priv)
                };
 
                if (!pci_dev_present(atom_hdaudio_ids)) {
-                       DRM_INFO("%s\n", "HDaudio controller not detected, using LPE audio instead\n");
+                       DRM_INFO("HDaudio controller not detected, using LPE audio instead\n");
                        lpe_present = true;
                }
        }
index 2615912..435ff86 100644 (file)
@@ -224,7 +224,7 @@ nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
                /* Determine if we can get a cache-coherent map, forcing
                 * uncached mapping if we can't.
                 */
-               if (mmu->type[drm->ttm.type_host].type & NVIF_MEM_UNCACHED)
+               if (!nouveau_drm_use_coherent_gpu_mapping(drm))
                        nvbo->force_coherent = true;
        }
 
@@ -262,7 +262,8 @@ nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
                if (cli->device.info.family > NV_DEVICE_INFO_V0_CURIE &&
                    (flags & TTM_PL_FLAG_VRAM) && !vmm->page[i].vram)
                        continue;
-               if ((flags & TTM_PL_FLAG_TT  ) && !vmm->page[i].host)
+               if ((flags & TTM_PL_FLAG_TT) &&
+                   (!vmm->page[i].host || vmm->page[i].shift > PAGE_SHIFT))
                        continue;
 
                /* Select this page size if it's the first that supports
index 8d4a5be..56fe261 100644 (file)
@@ -152,9 +152,9 @@ nouveau_cli_work_queue(struct nouveau_cli *cli, struct dma_fence *fence,
        work->cli = cli;
        mutex_lock(&cli->lock);
        list_add_tail(&work->head, &cli->worker);
-       mutex_unlock(&cli->lock);
        if (dma_fence_add_callback(fence, &work->cb, nouveau_cli_work_fence))
                nouveau_cli_work_fence(fence, &work->cb);
+       mutex_unlock(&cli->lock);
 }
 
 static void
index 3331e82..96f6bd8 100644 (file)
@@ -157,8 +157,8 @@ struct nouveau_drm {
                struct nvif_object copy;
                int mtrr;
                int type_vram;
-               int type_host;
-               int type_ncoh;
+               int type_host[2];
+               int type_ncoh[2];
        } ttm;
 
        /* GEM interface support */
@@ -217,6 +217,13 @@ nouveau_drm(struct drm_device *dev)
        return dev->dev_private;
 }
 
+static inline bool
+nouveau_drm_use_coherent_gpu_mapping(struct nouveau_drm *drm)
+{
+       struct nvif_mmu *mmu = &drm->client.mmu;
+       return !(mmu->type[drm->ttm.type_host[0]].type & NVIF_MEM_UNCACHED);
+}
+
 int nouveau_pmops_suspend(struct device *);
 int nouveau_pmops_resume(struct device *);
 bool nouveau_pmops_runtime(void);
index c533d8e..be7357b 100644 (file)
@@ -429,7 +429,7 @@ nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *fbcon)
        drm_fb_helper_unregister_fbi(&fbcon->helper);
        drm_fb_helper_fini(&fbcon->helper);
 
-       if (nouveau_fb->nvbo) {
+       if (nouveau_fb && nouveau_fb->nvbo) {
                nouveau_vma_del(&nouveau_fb->vma);
                nouveau_bo_unmap(nouveau_fb->nvbo);
                nouveau_bo_unpin(nouveau_fb->nvbo);
index 589a962..c002f89 100644 (file)
@@ -103,10 +103,10 @@ nouveau_mem_host(struct ttm_mem_reg *reg, struct ttm_dma_tt *tt)
        u8 type;
        int ret;
 
-       if (mmu->type[drm->ttm.type_host].type & NVIF_MEM_UNCACHED)
-               type = drm->ttm.type_ncoh;
+       if (!nouveau_drm_use_coherent_gpu_mapping(drm))
+               type = drm->ttm.type_ncoh[!!mem->kind];
        else
-               type = drm->ttm.type_host;
+               type = drm->ttm.type_host[0];
 
        if (mem->kind && !(mmu->type[type].type & NVIF_MEM_KIND))
                mem->comp = mem->kind = 0;
index 08b974b..dff51a0 100644 (file)
@@ -235,27 +235,46 @@ nouveau_ttm_global_release(struct nouveau_drm *drm)
        drm->ttm.mem_global_ref.release = NULL;
 }
 
-int
-nouveau_ttm_init(struct nouveau_drm *drm)
+static int
+nouveau_ttm_init_host(struct nouveau_drm *drm, u8 kind)
 {
-       struct nvkm_device *device = nvxx_device(&drm->client.device);
-       struct nvkm_pci *pci = device->pci;
        struct nvif_mmu *mmu = &drm->client.mmu;
-       struct drm_device *dev = drm->dev;
-       int typei, ret;
+       int typei;
 
        typei = nvif_mmu_type(mmu, NVIF_MEM_HOST | NVIF_MEM_MAPPABLE |
-                                                  NVIF_MEM_COHERENT);
+                                           kind | NVIF_MEM_COHERENT);
        if (typei < 0)
                return -ENOSYS;
 
-       drm->ttm.type_host = typei;
+       drm->ttm.type_host[!!kind] = typei;
 
-       typei = nvif_mmu_type(mmu, NVIF_MEM_HOST | NVIF_MEM_MAPPABLE);
+       typei = nvif_mmu_type(mmu, NVIF_MEM_HOST | NVIF_MEM_MAPPABLE | kind);
        if (typei < 0)
                return -ENOSYS;
 
-       drm->ttm.type_ncoh = typei;
+       drm->ttm.type_ncoh[!!kind] = typei;
+       return 0;
+}
+
+int
+nouveau_ttm_init(struct nouveau_drm *drm)
+{
+       struct nvkm_device *device = nvxx_device(&drm->client.device);
+       struct nvkm_pci *pci = device->pci;
+       struct nvif_mmu *mmu = &drm->client.mmu;
+       struct drm_device *dev = drm->dev;
+       int typei, ret;
+
+       ret = nouveau_ttm_init_host(drm, 0);
+       if (ret)
+               return ret;
+
+       if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA &&
+           drm->client.device.info.chipset != 0x50) {
+               ret = nouveau_ttm_init_host(drm, NVIF_MEM_KIND);
+               if (ret)
+                       return ret;
+       }
 
        if (drm->client.device.info.platform != NV_DEVICE_INFO_V0_SOC &&
            drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
index 9e2628d..f5371d9 100644 (file)
@@ -67,8 +67,8 @@ nouveau_vma_del(struct nouveau_vma **pvma)
                        nvif_vmm_put(&vma->vmm->vmm, &tmp);
                }
                list_del(&vma->head);
-               *pvma = NULL;
                kfree(*pvma);
+               *pvma = NULL;
        }
 }
 
index e146436..00eeaaf 100644 (file)
@@ -2369,7 +2369,7 @@ nv13b_chipset = {
        .imem = gk20a_instmem_new,
        .ltc = gp100_ltc_new,
        .mc = gp10b_mc_new,
-       .mmu = gf100_mmu_new,
+       .mmu = gp10b_mmu_new,
        .secboot = gp10b_secboot_new,
        .pmu = gm20b_pmu_new,
        .timer = gk20a_timer_new,
index 972370e..7c7efa4 100644 (file)
@@ -36,6 +36,7 @@ nvbios_dp_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
                        if (data) {
                                *ver = nvbios_rd08(bios, data + 0x00);
                                switch (*ver) {
+                               case 0x20:
                                case 0x21:
                                case 0x30:
                                case 0x40:
@@ -63,6 +64,7 @@ nvbios_dpout_entry(struct nvkm_bios *bios, u8 idx,
        if (data && idx < *cnt) {
                u16 outp = nvbios_rd16(bios, data + *hdr + idx * *len);
                switch (*ver * !!outp) {
+               case 0x20:
                case 0x21:
                case 0x30:
                        *hdr = nvbios_rd08(bios, data + 0x04);
@@ -96,12 +98,16 @@ nvbios_dpout_parse(struct nvkm_bios *bios, u8 idx,
                info->type = nvbios_rd16(bios, data + 0x00);
                info->mask = nvbios_rd16(bios, data + 0x02);
                switch (*ver) {
+               case 0x20:
+                       info->mask |= 0x00c0; /* match any link */
+                       /* fall-through */
                case 0x21:
                case 0x30:
                        info->flags     = nvbios_rd08(bios, data + 0x05);
                        info->script[0] = nvbios_rd16(bios, data + 0x06);
                        info->script[1] = nvbios_rd16(bios, data + 0x08);
-                       info->lnkcmp    = nvbios_rd16(bios, data + 0x0a);
+                       if (*len >= 0x0c)
+                               info->lnkcmp    = nvbios_rd16(bios, data + 0x0a);
                        if (*len >= 0x0f) {
                                info->script[2] = nvbios_rd16(bios, data + 0x0c);
                                info->script[3] = nvbios_rd16(bios, data + 0x0e);
@@ -170,6 +176,7 @@ nvbios_dpcfg_parse(struct nvkm_bios *bios, u16 outp, u8 idx,
        memset(info, 0x00, sizeof(*info));
        if (data) {
                switch (*ver) {
+               case 0x20:
                case 0x21:
                        info->dc    = nvbios_rd08(bios, data + 0x02);
                        info->pe    = nvbios_rd08(bios, data + 0x03);
index 1ba7289..db48a1d 100644 (file)
@@ -249,7 +249,7 @@ nv50_instobj_acquire(struct nvkm_memory *memory)
                        iobj->base.memory.ptrs = &nv50_instobj_fast;
                else
                        iobj->base.memory.ptrs = &nv50_instobj_slow;
-               refcount_inc(&iobj->maps);
+               refcount_set(&iobj->maps, 1);
        }
 
        mutex_unlock(&imem->subdev.mutex);
index b1b1f36..deb96de 100644 (file)
@@ -136,6 +136,13 @@ nvkm_pci_init(struct nvkm_subdev *subdev)
                return ret;
 
        pci->irq = pdev->irq;
+
+       /* Ensure MSI interrupts are armed, for the case where there are
+        * already interrupts pending (for whatever reason) at load time.
+        */
+       if (pci->msi)
+               pci->func->msi_rearm(pci);
+
        return ret;
 }
 
index dda904e..500b6fb 100644 (file)
@@ -175,11 +175,31 @@ static void sun4i_hdmi_mode_set(struct drm_encoder *encoder,
        writel(val, hdmi->base + SUN4I_HDMI_VID_TIMING_POL_REG);
 }
 
+static enum drm_mode_status sun4i_hdmi_mode_valid(struct drm_encoder *encoder,
+                                       const struct drm_display_mode *mode)
+{
+       struct sun4i_hdmi *hdmi = drm_encoder_to_sun4i_hdmi(encoder);
+       unsigned long rate = mode->clock * 1000;
+       unsigned long diff = rate / 200; /* +-0.5% allowed by HDMI spec */
+       long rounded_rate;
+
+       /* 165 MHz is the typical max pixelclock frequency for HDMI <= 1.2 */
+       if (rate > 165000000)
+               return MODE_CLOCK_HIGH;
+       rounded_rate = clk_round_rate(hdmi->tmds_clk, rate);
+       if (rounded_rate > 0 &&
+           max_t(unsigned long, rounded_rate, rate) -
+           min_t(unsigned long, rounded_rate, rate) < diff)
+               return MODE_OK;
+       return MODE_NOCLOCK;
+}
+
 static const struct drm_encoder_helper_funcs sun4i_hdmi_helper_funcs = {
        .atomic_check   = sun4i_hdmi_atomic_check,
        .disable        = sun4i_hdmi_disable,
        .enable         = sun4i_hdmi_enable,
        .mode_set       = sun4i_hdmi_mode_set,
+       .mode_valid     = sun4i_hdmi_mode_valid,
 };
 
 static const struct drm_encoder_funcs sun4i_hdmi_funcs = {
index e122f5b..f4284b5 100644 (file)
@@ -724,12 +724,12 @@ static int sun4i_tcon_bind(struct device *dev, struct device *master,
        if (IS_ERR(tcon->crtc)) {
                dev_err(dev, "Couldn't create our CRTC\n");
                ret = PTR_ERR(tcon->crtc);
-               goto err_free_clocks;
+               goto err_free_dotclock;
        }
 
        ret = sun4i_rgb_init(drm, tcon);
        if (ret < 0)
-               goto err_free_clocks;
+               goto err_free_dotclock;
 
        if (tcon->quirks->needs_de_be_mux) {
                /*
index 44343a2..b5ba644 100644 (file)
@@ -455,6 +455,7 @@ ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
                freed += (nr_free_pool - shrink_pages) << pool->order;
                if (freed >= sc->nr_to_scan)
                        break;
+               shrink_pages <<= pool->order;
        }
        mutex_unlock(&lock);
        return freed;
@@ -543,7 +544,7 @@ static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
        int r = 0;
        unsigned i, j, cpages;
        unsigned npages = 1 << order;
-       unsigned max_cpages = min(count, (unsigned)NUM_PAGES_TO_ALLOC);
+       unsigned max_cpages = min(count << order, (unsigned)NUM_PAGES_TO_ALLOC);
 
        /* allocate array for page caching change */
        caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
index c9790e2..af51230 100644 (file)
@@ -143,6 +143,7 @@ static int hwmon_thermal_add_sensor(struct device *dev,
                                    struct hwmon_device *hwdev, int index)
 {
        struct hwmon_thermal_data *tdata;
+       struct thermal_zone_device *tzd;
 
        tdata = devm_kzalloc(dev, sizeof(*tdata), GFP_KERNEL);
        if (!tdata)
@@ -151,8 +152,14 @@ static int hwmon_thermal_add_sensor(struct device *dev,
        tdata->hwdev = hwdev;
        tdata->index = index;
 
-       devm_thermal_zone_of_sensor_register(&hwdev->dev, index, tdata,
-                                            &hwmon_thermal_ops);
+       tzd = devm_thermal_zone_of_sensor_register(&hwdev->dev, index, tdata,
+                                                  &hwmon_thermal_ops);
+       /*
+        * If CONFIG_THERMAL_OF is disabled, this returns -ENODEV,
+        * so ignore that error but forward any other error.
+        */
+       if (IS_ERR(tzd) && (PTR_ERR(tzd) != -ENODEV))
+               return PTR_ERR(tzd);
 
        return 0;
 }
@@ -621,14 +628,20 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata,
                                if (!chip->ops->is_visible(drvdata, hwmon_temp,
                                                           hwmon_temp_input, j))
                                        continue;
-                               if (info[i]->config[j] & HWMON_T_INPUT)
-                                       hwmon_thermal_add_sensor(dev, hwdev, j);
+                               if (info[i]->config[j] & HWMON_T_INPUT) {
+                                       err = hwmon_thermal_add_sensor(dev,
+                                                               hwdev, j);
+                                       if (err)
+                                               goto free_device;
+                               }
                        }
                }
        }
 
        return hdev;
 
+free_device:
+       device_unregister(hdev);
 free_hwmon:
        kfree(hwdev);
 ida_remove:
index feafdb9..59b2f96 100644 (file)
@@ -386,6 +386,9 @@ int ib_open_shared_qp_security(struct ib_qp *qp, struct ib_device *dev)
        if (ret)
                return ret;
 
+       if (!qp->qp_sec)
+               return 0;
+
        mutex_lock(&real_qp->qp_sec->mutex);
        ret = check_qp_port_pkey_settings(real_qp->qp_sec->ports_pkeys,
                                          qp->qp_sec);
index d0202bb..840b240 100644 (file)
@@ -2074,8 +2074,8 @@ int ib_uverbs_ex_modify_qp(struct ib_uverbs_file *file,
                return -EOPNOTSUPP;
 
        if (ucore->inlen > sizeof(cmd)) {
-               if (ib_is_udata_cleared(ucore, sizeof(cmd),
-                                       ucore->inlen - sizeof(cmd)))
+               if (!ib_is_udata_cleared(ucore, sizeof(cmd),
+                                        ucore->inlen - sizeof(cmd)))
                        return -EOPNOTSUPP;
        }
 
index 3fb8fb6..e36d27e 100644 (file)
@@ -1438,7 +1438,8 @@ int ib_close_qp(struct ib_qp *qp)
        spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags);
 
        atomic_dec(&real_qp->usecnt);
-       ib_close_shared_qp_security(qp->qp_sec);
+       if (qp->qp_sec)
+               ib_close_shared_qp_security(qp->qp_sec);
        kfree(qp);
 
        return 0;
index b7bfc53..6f2b261 100644 (file)
@@ -395,7 +395,7 @@ next_cqe:
 
 static int cqe_completes_wr(struct t4_cqe *cqe, struct t4_wq *wq)
 {
-       if (CQE_OPCODE(cqe) == C4IW_DRAIN_OPCODE) {
+       if (DRAIN_CQE(cqe)) {
                WARN_ONCE(1, "Unexpected DRAIN CQE qp id %u!\n", wq->sq.qid);
                return 0;
        }
@@ -494,7 +494,7 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
        /*
         * Special cqe for drain WR completions...
         */
-       if (CQE_OPCODE(hw_cqe) == C4IW_DRAIN_OPCODE) {
+       if (DRAIN_CQE(hw_cqe)) {
                *cookie = CQE_DRAIN_COOKIE(hw_cqe);
                *cqe = *hw_cqe;
                goto skip_cqe;
@@ -571,10 +571,10 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
                        ret = -EAGAIN;
                        goto skip_cqe;
                }
-               if (unlikely((CQE_WRID_MSN(hw_cqe) != (wq->rq.msn)))) {
+               if (unlikely(!CQE_STATUS(hw_cqe) &&
+                            CQE_WRID_MSN(hw_cqe) != wq->rq.msn)) {
                        t4_set_wq_in_error(wq);
-                       hw_cqe->header |= htonl(CQE_STATUS_V(T4_ERR_MSN));
-                       goto proc_cqe;
+                       hw_cqe->header |= cpu_to_be32(CQE_STATUS_V(T4_ERR_MSN));
                }
                goto proc_cqe;
        }
@@ -748,9 +748,6 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
                                c4iw_invalidate_mr(qhp->rhp,
                                                   CQE_WRID_FR_STAG(&cqe));
                        break;
-               case C4IW_DRAIN_OPCODE:
-                       wc->opcode = IB_WC_SEND;
-                       break;
                default:
                        pr_err("Unexpected opcode %d in the CQE received for QPID=0x%0x\n",
                               CQE_OPCODE(&cqe), CQE_QPID(&cqe));
index 470f97a..65dd372 100644 (file)
@@ -693,8 +693,6 @@ static inline int to_ib_qp_state(int c4iw_qp_state)
        return IB_QPS_ERR;
 }
 
-#define C4IW_DRAIN_OPCODE FW_RI_SGE_EC_CR_RETURN
-
 static inline u32 c4iw_ib_to_tpt_access(int a)
 {
        return (a & IB_ACCESS_REMOTE_WRITE ? FW_RI_MEM_ACCESS_REM_WRITE : 0) |
index 38bddd0..d5c92fc 100644 (file)
@@ -790,21 +790,57 @@ static int ring_kernel_rq_db(struct c4iw_qp *qhp, u16 inc)
        return 0;
 }
 
-static void complete_sq_drain_wr(struct c4iw_qp *qhp, struct ib_send_wr *wr)
+static int ib_to_fw_opcode(int ib_opcode)
+{
+       int opcode;
+
+       switch (ib_opcode) {
+       case IB_WR_SEND_WITH_INV:
+               opcode = FW_RI_SEND_WITH_INV;
+               break;
+       case IB_WR_SEND:
+               opcode = FW_RI_SEND;
+               break;
+       case IB_WR_RDMA_WRITE:
+               opcode = FW_RI_RDMA_WRITE;
+               break;
+       case IB_WR_RDMA_READ:
+       case IB_WR_RDMA_READ_WITH_INV:
+               opcode = FW_RI_READ_REQ;
+               break;
+       case IB_WR_REG_MR:
+               opcode = FW_RI_FAST_REGISTER;
+               break;
+       case IB_WR_LOCAL_INV:
+               opcode = FW_RI_LOCAL_INV;
+               break;
+       default:
+               opcode = -EINVAL;
+       }
+       return opcode;
+}
+
+static int complete_sq_drain_wr(struct c4iw_qp *qhp, struct ib_send_wr *wr)
 {
        struct t4_cqe cqe = {};
        struct c4iw_cq *schp;
        unsigned long flag;
        struct t4_cq *cq;
+       int opcode;
 
        schp = to_c4iw_cq(qhp->ibqp.send_cq);
        cq = &schp->cq;
 
+       opcode = ib_to_fw_opcode(wr->opcode);
+       if (opcode < 0)
+               return opcode;
+
        cqe.u.drain_cookie = wr->wr_id;
        cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
-                                CQE_OPCODE_V(C4IW_DRAIN_OPCODE) |
+                                CQE_OPCODE_V(opcode) |
                                 CQE_TYPE_V(1) |
                                 CQE_SWCQE_V(1) |
+                                CQE_DRAIN_V(1) |
                                 CQE_QPID_V(qhp->wq.sq.qid));
 
        spin_lock_irqsave(&schp->lock, flag);
@@ -819,6 +855,23 @@ static void complete_sq_drain_wr(struct c4iw_qp *qhp, struct ib_send_wr *wr)
                                           schp->ibcq.cq_context);
                spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
        }
+       return 0;
+}
+
+static int complete_sq_drain_wrs(struct c4iw_qp *qhp, struct ib_send_wr *wr,
+                               struct ib_send_wr **bad_wr)
+{
+       int ret = 0;
+
+       while (wr) {
+               ret = complete_sq_drain_wr(qhp, wr);
+               if (ret) {
+                       *bad_wr = wr;
+                       break;
+               }
+               wr = wr->next;
+       }
+       return ret;
 }
 
 static void complete_rq_drain_wr(struct c4iw_qp *qhp, struct ib_recv_wr *wr)
@@ -833,9 +886,10 @@ static void complete_rq_drain_wr(struct c4iw_qp *qhp, struct ib_recv_wr *wr)
 
        cqe.u.drain_cookie = wr->wr_id;
        cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
-                                CQE_OPCODE_V(C4IW_DRAIN_OPCODE) |
+                                CQE_OPCODE_V(FW_RI_SEND) |
                                 CQE_TYPE_V(0) |
                                 CQE_SWCQE_V(1) |
+                                CQE_DRAIN_V(1) |
                                 CQE_QPID_V(qhp->wq.sq.qid));
 
        spin_lock_irqsave(&rchp->lock, flag);
@@ -852,6 +906,14 @@ static void complete_rq_drain_wr(struct c4iw_qp *qhp, struct ib_recv_wr *wr)
        }
 }
 
+static void complete_rq_drain_wrs(struct c4iw_qp *qhp, struct ib_recv_wr *wr)
+{
+       while (wr) {
+               complete_rq_drain_wr(qhp, wr);
+               wr = wr->next;
+       }
+}
+
 int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
                   struct ib_send_wr **bad_wr)
 {
@@ -875,7 +937,7 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
         */
        if (qhp->wq.flushed) {
                spin_unlock_irqrestore(&qhp->lock, flag);
-               complete_sq_drain_wr(qhp, wr);
+               err = complete_sq_drain_wrs(qhp, wr, bad_wr);
                return err;
        }
        num_wrs = t4_sq_avail(&qhp->wq);
@@ -1023,7 +1085,7 @@ int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
         */
        if (qhp->wq.flushed) {
                spin_unlock_irqrestore(&qhp->lock, flag);
-               complete_rq_drain_wr(qhp, wr);
+               complete_rq_drain_wrs(qhp, wr);
                return err;
        }
        num_wrs = t4_rq_avail(&qhp->wq);
index e9ea942..79e8ee1 100644 (file)
@@ -197,6 +197,11 @@ struct t4_cqe {
 #define CQE_SWCQE_G(x)    ((((x) >> CQE_SWCQE_S)) & CQE_SWCQE_M)
 #define CQE_SWCQE_V(x)   ((x)<<CQE_SWCQE_S)
 
+#define CQE_DRAIN_S       10
+#define CQE_DRAIN_M       0x1
+#define CQE_DRAIN_G(x)    ((((x) >> CQE_DRAIN_S)) & CQE_DRAIN_M)
+#define CQE_DRAIN_V(x)   ((x)<<CQE_DRAIN_S)
+
 #define CQE_STATUS_S      5
 #define CQE_STATUS_M      0x1F
 #define CQE_STATUS_G(x)   ((((x) >> CQE_STATUS_S)) & CQE_STATUS_M)
@@ -213,6 +218,7 @@ struct t4_cqe {
 #define CQE_OPCODE_V(x)   ((x)<<CQE_OPCODE_S)
 
 #define SW_CQE(x)         (CQE_SWCQE_G(be32_to_cpu((x)->header)))
+#define DRAIN_CQE(x)      (CQE_DRAIN_G(be32_to_cpu((x)->header)))
 #define CQE_QPID(x)       (CQE_QPID_G(be32_to_cpu((x)->header)))
 #define CQE_TYPE(x)       (CQE_TYPE_G(be32_to_cpu((x)->header)))
 #define SQ_TYPE(x)       (CQE_TYPE((x)))
index 4a9b4d7..8ce9118 100644 (file)
@@ -1131,7 +1131,6 @@ struct hfi1_devdata {
        u16 pcie_lnkctl;
        u16 pcie_devctl2;
        u32 pci_msix0;
-       u32 pci_lnkctl3;
        u32 pci_tph2;
 
        /*
index 09e50fd..8c7e7a6 100644 (file)
@@ -411,15 +411,12 @@ int restore_pci_variables(struct hfi1_devdata *dd)
        if (ret)
                goto error;
 
-       ret = pci_write_config_dword(dd->pcidev, PCIE_CFG_SPCIE1,
-                                    dd->pci_lnkctl3);
-       if (ret)
-               goto error;
-
-       ret = pci_write_config_dword(dd->pcidev, PCIE_CFG_TPH2, dd->pci_tph2);
-       if (ret)
-               goto error;
-
+       if (pci_find_ext_capability(dd->pcidev, PCI_EXT_CAP_ID_TPH)) {
+               ret = pci_write_config_dword(dd->pcidev, PCIE_CFG_TPH2,
+                                            dd->pci_tph2);
+               if (ret)
+                       goto error;
+       }
        return 0;
 
 error:
@@ -469,15 +466,12 @@ int save_pci_variables(struct hfi1_devdata *dd)
        if (ret)
                goto error;
 
-       ret = pci_read_config_dword(dd->pcidev, PCIE_CFG_SPCIE1,
-                                   &dd->pci_lnkctl3);
-       if (ret)
-               goto error;
-
-       ret = pci_read_config_dword(dd->pcidev, PCIE_CFG_TPH2, &dd->pci_tph2);
-       if (ret)
-               goto error;
-
+       if (pci_find_ext_capability(dd->pcidev, PCI_EXT_CAP_ID_TPH)) {
+               ret = pci_read_config_dword(dd->pcidev, PCIE_CFG_TPH2,
+                                           &dd->pci_tph2);
+               if (ret)
+                       goto error;
+       }
        return 0;
 
 error:
index 470995f..6f6712f 100644 (file)
@@ -47,17 +47,6 @@ int mlx5_cmd_null_mkey(struct mlx5_core_dev *dev, u32 *null_mkey)
        return err;
 }
 
-int mlx5_cmd_query_cong_counter(struct mlx5_core_dev *dev,
-                               bool reset, void *out, int out_size)
-{
-       u32 in[MLX5_ST_SZ_DW(query_cong_statistics_in)] = { };
-
-       MLX5_SET(query_cong_statistics_in, in, opcode,
-                MLX5_CMD_OP_QUERY_CONG_STATISTICS);
-       MLX5_SET(query_cong_statistics_in, in, clear, reset);
-       return mlx5_cmd_exec(dev, in, sizeof(in), out, out_size);
-}
-
 int mlx5_cmd_query_cong_params(struct mlx5_core_dev *dev, int cong_point,
                               void *out, int out_size)
 {
index af4c245..78ffded 100644 (file)
@@ -37,8 +37,6 @@
 #include <linux/mlx5/driver.h>
 
 int mlx5_cmd_null_mkey(struct mlx5_core_dev *dev, u32 *null_mkey);
-int mlx5_cmd_query_cong_counter(struct mlx5_core_dev *dev,
-                               bool reset, void *out, int out_size);
 int mlx5_cmd_query_cong_params(struct mlx5_core_dev *dev, int cong_point,
                               void *out, int out_size);
 int mlx5_cmd_modify_cong_params(struct mlx5_core_dev *mdev,
index 543d0a4..8ac50de 100644 (file)
@@ -1463,6 +1463,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
        }
 
        INIT_LIST_HEAD(&context->vma_private_list);
+       mutex_init(&context->vma_private_list_mutex);
        INIT_LIST_HEAD(&context->db_page_list);
        mutex_init(&context->db_page_mutex);
 
@@ -1624,7 +1625,9 @@ static void  mlx5_ib_vma_close(struct vm_area_struct *area)
         * mlx5_ib_disassociate_ucontext().
         */
        mlx5_ib_vma_priv_data->vma = NULL;
+       mutex_lock(mlx5_ib_vma_priv_data->vma_private_list_mutex);
        list_del(&mlx5_ib_vma_priv_data->list);
+       mutex_unlock(mlx5_ib_vma_priv_data->vma_private_list_mutex);
        kfree(mlx5_ib_vma_priv_data);
 }
 
@@ -1644,10 +1647,13 @@ static int mlx5_ib_set_vma_data(struct vm_area_struct *vma,
                return -ENOMEM;
 
        vma_prv->vma = vma;
+       vma_prv->vma_private_list_mutex = &ctx->vma_private_list_mutex;
        vma->vm_private_data = vma_prv;
        vma->vm_ops =  &mlx5_ib_vm_ops;
 
+       mutex_lock(&ctx->vma_private_list_mutex);
        list_add(&vma_prv->list, vma_head);
+       mutex_unlock(&ctx->vma_private_list_mutex);
 
        return 0;
 }
@@ -1690,6 +1696,7 @@ static void mlx5_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
         * mlx5_ib_vma_close.
         */
        down_write(&owning_mm->mmap_sem);
+       mutex_lock(&context->vma_private_list_mutex);
        list_for_each_entry_safe(vma_private, n, &context->vma_private_list,
                                 list) {
                vma = vma_private->vma;
@@ -1704,6 +1711,7 @@ static void mlx5_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
                list_del(&vma_private->list);
                kfree(vma_private);
        }
+       mutex_unlock(&context->vma_private_list_mutex);
        up_write(&owning_mm->mmap_sem);
        mmput(owning_mm);
        put_task_struct(owning_process);
@@ -3737,34 +3745,6 @@ free:
        return ret;
 }
 
-static int mlx5_ib_query_cong_counters(struct mlx5_ib_dev *dev,
-                                      struct mlx5_ib_port *port,
-                                      struct rdma_hw_stats *stats)
-{
-       int outlen = MLX5_ST_SZ_BYTES(query_cong_statistics_out);
-       void *out;
-       int ret, i;
-       int offset = port->cnts.num_q_counters;
-
-       out = kvzalloc(outlen, GFP_KERNEL);
-       if (!out)
-               return -ENOMEM;
-
-       ret = mlx5_cmd_query_cong_counter(dev->mdev, false, out, outlen);
-       if (ret)
-               goto free;
-
-       for (i = 0; i < port->cnts.num_cong_counters; i++) {
-               stats->value[i + offset] =
-                       be64_to_cpup((__be64 *)(out +
-                                    port->cnts.offsets[i + offset]));
-       }
-
-free:
-       kvfree(out);
-       return ret;
-}
-
 static int mlx5_ib_get_hw_stats(struct ib_device *ibdev,
                                struct rdma_hw_stats *stats,
                                u8 port_num, int index)
@@ -3782,7 +3762,12 @@ static int mlx5_ib_get_hw_stats(struct ib_device *ibdev,
        num_counters = port->cnts.num_q_counters;
 
        if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) {
-               ret = mlx5_ib_query_cong_counters(dev, port, stats);
+               ret = mlx5_lag_query_cong_counters(dev->mdev,
+                                                  stats->value +
+                                                  port->cnts.num_q_counters,
+                                                  port->cnts.num_cong_counters,
+                                                  port->cnts.offsets +
+                                                  port->cnts.num_q_counters);
                if (ret)
                        return ret;
                num_counters += port->cnts.num_cong_counters;
index 6dd8cac..2c5f353 100644 (file)
@@ -115,6 +115,8 @@ enum {
 struct mlx5_ib_vma_private_data {
        struct list_head list;
        struct vm_area_struct *vma;
+       /* protect vma_private_list add/del */
+       struct mutex *vma_private_list_mutex;
 };
 
 struct mlx5_ib_ucontext {
@@ -129,6 +131,8 @@ struct mlx5_ib_ucontext {
        /* Transport Domain number */
        u32                     tdn;
        struct list_head        vma_private_list;
+       /* protect vma_private_list add/del */
+       struct mutex            vma_private_list_mutex;
 
        unsigned long           upd_xlt_page;
        /* protect ODP/KSM */
index ee0ee1f..d109fe8 100644 (file)
@@ -1637,6 +1637,7 @@ struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
        MLX5_SET(mkc, mkc, access_mode, mr->access_mode);
        MLX5_SET(mkc, mkc, umr_en, 1);
 
+       mr->ibmr.device = pd->device;
        err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, inlen);
        if (err)
                goto err_destroy_psv;
index 50812b3..a9c3378 100644 (file)
@@ -264,7 +264,7 @@ static int qedr_register_device(struct qedr_dev *dev)
 static int qedr_alloc_mem_sb(struct qedr_dev *dev,
                             struct qed_sb_info *sb_info, u16 sb_id)
 {
-       struct status_block *sb_virt;
+       struct status_block_e4 *sb_virt;
        dma_addr_t sb_phys;
        int rc;
 
index b7587f1..78b4900 100644 (file)
@@ -164,6 +164,13 @@ struct rdma_srq_sge {
        __le32 l_key;
 };
 
+/* Rdma doorbell data for flags update */
+struct rdma_pwm_flags_data {
+       __le16 icid; /* internal CID */
+       u8 agg_flags; /* aggregative flags */
+       u8 reserved;
+};
+
 /* Rdma doorbell data for SQ and RQ */
 struct rdma_pwm_val16_data {
        __le16 icid;
@@ -180,12 +187,16 @@ struct rdma_pwm_val32_data {
        __le16 icid;
        u8 agg_flags;
        u8 params;
-#define RDMA_PWM_VAL32_DATA_AGG_CMD_MASK    0x3
-#define RDMA_PWM_VAL32_DATA_AGG_CMD_SHIFT   0
-#define RDMA_PWM_VAL32_DATA_BYPASS_EN_MASK  0x1
-#define RDMA_PWM_VAL32_DATA_BYPASS_EN_SHIFT 2
-#define RDMA_PWM_VAL32_DATA_RESERVED_MASK   0x1F
-#define RDMA_PWM_VAL32_DATA_RESERVED_SHIFT  3
+#define RDMA_PWM_VAL32_DATA_AGG_CMD_MASK               0x3
+#define RDMA_PWM_VAL32_DATA_AGG_CMD_SHIFT              0
+#define RDMA_PWM_VAL32_DATA_BYPASS_EN_MASK             0x1
+#define RDMA_PWM_VAL32_DATA_BYPASS_EN_SHIFT            2
+#define RDMA_PWM_VAL32_DATA_CONN_TYPE_IS_IWARP_MASK    0x1
+#define RDMA_PWM_VAL32_DATA_CONN_TYPE_IS_IWARP_SHIFT   3
+#define RDMA_PWM_VAL32_DATA_SET_16B_VAL_MASK           0x1
+#define RDMA_PWM_VAL32_DATA_SET_16B_VAL_SHIFT          4
+#define RDMA_PWM_VAL32_DATA_RESERVED_MASK              0x7
+#define RDMA_PWM_VAL32_DATA_RESERVED_SHIFT             5
        __le32 value;
 };
 
@@ -478,23 +489,25 @@ struct rdma_sq_fmr_wqe {
        __le16 dif_app_tag_mask;
        __le16 dif_runt_crc_value;
        __le16 dif_flags;
-#define RDMA_SQ_FMR_WQE_DIF_IO_DIRECTION_FLG_MASK    0x1
-#define RDMA_SQ_FMR_WQE_DIF_IO_DIRECTION_FLG_SHIFT   0
-#define RDMA_SQ_FMR_WQE_DIF_BLOCK_SIZE_MASK          0x1
-#define RDMA_SQ_FMR_WQE_DIF_BLOCK_SIZE_SHIFT         1
-#define RDMA_SQ_FMR_WQE_DIF_RUNT_VALID_FLG_MASK      0x1
-#define RDMA_SQ_FMR_WQE_DIF_RUNT_VALID_FLG_SHIFT     2
-#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_CRC_GUARD_MASK  0x1
-#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_CRC_GUARD_SHIFT 3
-#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_REF_TAG_MASK    0x1
-#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_REF_TAG_SHIFT   4
-#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_APP_TAG_MASK    0x1
-#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_APP_TAG_SHIFT   5
-#define RDMA_SQ_FMR_WQE_DIF_CRC_SEED_MASK            0x1
-#define RDMA_SQ_FMR_WQE_DIF_CRC_SEED_SHIFT           6
-#define RDMA_SQ_FMR_WQE_RESERVED4_MASK               0x1FF
-#define RDMA_SQ_FMR_WQE_RESERVED4_SHIFT              7
-       __le32 Reserved5;
+#define RDMA_SQ_FMR_WQE_DIF_IO_DIRECTION_FLG_MASK      0x1
+#define RDMA_SQ_FMR_WQE_DIF_IO_DIRECTION_FLG_SHIFT     0
+#define RDMA_SQ_FMR_WQE_DIF_BLOCK_SIZE_MASK            0x1
+#define RDMA_SQ_FMR_WQE_DIF_BLOCK_SIZE_SHIFT           1
+#define RDMA_SQ_FMR_WQE_DIF_RUNT_VALID_FLG_MASK                0x1
+#define RDMA_SQ_FMR_WQE_DIF_RUNT_VALID_FLG_SHIFT       2
+#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_CRC_GUARD_MASK    0x1
+#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_CRC_GUARD_SHIFT   3
+#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_REF_TAG_MASK      0x1
+#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_REF_TAG_SHIFT     4
+#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_APP_TAG_MASK      0x1
+#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_APP_TAG_SHIFT     5
+#define RDMA_SQ_FMR_WQE_DIF_CRC_SEED_MASK              0x1
+#define RDMA_SQ_FMR_WQE_DIF_CRC_SEED_SHIFT             6
+#define RDMA_SQ_FMR_WQE_DIF_RX_REF_TAG_CONST_MASK      0x1
+#define RDMA_SQ_FMR_WQE_DIF_RX_REF_TAG_CONST_SHIFT     7
+#define RDMA_SQ_FMR_WQE_RESERVED4_MASK                 0xFF
+#define RDMA_SQ_FMR_WQE_RESERVED4_SHIFT                        8
+       __le32 reserved5;
 };
 
 /* First element (16 bytes) of fmr wqe */
@@ -558,23 +571,25 @@ struct rdma_sq_fmr_wqe_3rd {
        __le16 dif_app_tag_mask;
        __le16 dif_runt_crc_value;
        __le16 dif_flags;
-#define RDMA_SQ_FMR_WQE_3RD_DIF_IO_DIRECTION_FLG_MASK    0x1
-#define RDMA_SQ_FMR_WQE_3RD_DIF_IO_DIRECTION_FLG_SHIFT   0
-#define RDMA_SQ_FMR_WQE_3RD_DIF_BLOCK_SIZE_MASK          0x1
-#define RDMA_SQ_FMR_WQE_3RD_DIF_BLOCK_SIZE_SHIFT         1
-#define RDMA_SQ_FMR_WQE_3RD_DIF_RUNT_VALID_FLG_MASK      0x1
-#define RDMA_SQ_FMR_WQE_3RD_DIF_RUNT_VALID_FLG_SHIFT     2
-#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_CRC_GUARD_MASK  0x1
-#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_CRC_GUARD_SHIFT 3
-#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_REF_TAG_MASK    0x1
-#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_REF_TAG_SHIFT   4
-#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_APP_TAG_MASK    0x1
-#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_APP_TAG_SHIFT   5
-#define RDMA_SQ_FMR_WQE_3RD_DIF_CRC_SEED_MASK            0x1
-#define RDMA_SQ_FMR_WQE_3RD_DIF_CRC_SEED_SHIFT           6
-#define RDMA_SQ_FMR_WQE_3RD_RESERVED4_MASK               0x1FF
-#define RDMA_SQ_FMR_WQE_3RD_RESERVED4_SHIFT              7
-       __le32 Reserved5;
+#define RDMA_SQ_FMR_WQE_3RD_DIF_IO_DIRECTION_FLG_MASK          0x1
+#define RDMA_SQ_FMR_WQE_3RD_DIF_IO_DIRECTION_FLG_SHIFT         0
+#define RDMA_SQ_FMR_WQE_3RD_DIF_BLOCK_SIZE_MASK                        0x1
+#define RDMA_SQ_FMR_WQE_3RD_DIF_BLOCK_SIZE_SHIFT               1
+#define RDMA_SQ_FMR_WQE_3RD_DIF_RUNT_VALID_FLG_MASK            0x1
+#define RDMA_SQ_FMR_WQE_3RD_DIF_RUNT_VALID_FLG_SHIFT           2
+#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_CRC_GUARD_MASK                0x1
+#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_CRC_GUARD_SHIFT       3
+#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_REF_TAG_MASK          0x1
+#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_REF_TAG_SHIFT         4
+#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_APP_TAG_MASK          0x1
+#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_APP_TAG_SHIFT         5
+#define RDMA_SQ_FMR_WQE_3RD_DIF_CRC_SEED_MASK                  0x1
+#define RDMA_SQ_FMR_WQE_3RD_DIF_CRC_SEED_SHIFT                 6
+#define RDMA_SQ_FMR_WQE_3RD_DIF_RX_REF_TAG_CONST_MASK          0x1
+#define RDMA_SQ_FMR_WQE_3RD_DIF_RX_REF_TAG_CONST_SHIFT         7
+#define RDMA_SQ_FMR_WQE_3RD_RESERVED4_MASK                     0xFF
+#define RDMA_SQ_FMR_WQE_RESERVED4_SHIFT                                8
+       __le32 reserved5;
 };
 
 struct rdma_sq_local_inv_wqe {
@@ -606,20 +621,22 @@ struct rdma_sq_rdma_wqe {
        __le32 xrc_srq;
        u8 req_type;
        u8 flags;
-#define RDMA_SQ_RDMA_WQE_COMP_FLG_MASK                  0x1
-#define RDMA_SQ_RDMA_WQE_COMP_FLG_SHIFT                 0
-#define RDMA_SQ_RDMA_WQE_RD_FENCE_FLG_MASK              0x1
-#define RDMA_SQ_RDMA_WQE_RD_FENCE_FLG_SHIFT             1
-#define RDMA_SQ_RDMA_WQE_INV_FENCE_FLG_MASK             0x1
-#define RDMA_SQ_RDMA_WQE_INV_FENCE_FLG_SHIFT            2
-#define RDMA_SQ_RDMA_WQE_SE_FLG_MASK                    0x1
-#define RDMA_SQ_RDMA_WQE_SE_FLG_SHIFT                   3
-#define RDMA_SQ_RDMA_WQE_INLINE_FLG_MASK                0x1
-#define RDMA_SQ_RDMA_WQE_INLINE_FLG_SHIFT               4
-#define RDMA_SQ_RDMA_WQE_DIF_ON_HOST_FLG_MASK           0x1
-#define RDMA_SQ_RDMA_WQE_DIF_ON_HOST_FLG_SHIFT          5
-#define RDMA_SQ_RDMA_WQE_RESERVED0_MASK                 0x3
-#define RDMA_SQ_RDMA_WQE_RESERVED0_SHIFT                6
+#define RDMA_SQ_RDMA_WQE_COMP_FLG_MASK         0x1
+#define RDMA_SQ_RDMA_WQE_COMP_FLG_SHIFT                0
+#define RDMA_SQ_RDMA_WQE_RD_FENCE_FLG_MASK     0x1
+#define RDMA_SQ_RDMA_WQE_RD_FENCE_FLG_SHIFT    1
+#define RDMA_SQ_RDMA_WQE_INV_FENCE_FLG_MASK    0x1
+#define RDMA_SQ_RDMA_WQE_INV_FENCE_FLG_SHIFT   2
+#define RDMA_SQ_RDMA_WQE_SE_FLG_MASK           0x1
+#define RDMA_SQ_RDMA_WQE_SE_FLG_SHIFT          3
+#define RDMA_SQ_RDMA_WQE_INLINE_FLG_MASK       0x1
+#define RDMA_SQ_RDMA_WQE_INLINE_FLG_SHIFT      4
+#define RDMA_SQ_RDMA_WQE_DIF_ON_HOST_FLG_MASK  0x1
+#define RDMA_SQ_RDMA_WQE_DIF_ON_HOST_FLG_SHIFT 5
+#define RDMA_SQ_RDMA_WQE_READ_INV_FLG_MASK     0x1
+#define RDMA_SQ_RDMA_WQE_READ_INV_FLG_SHIFT    6
+#define RDMA_SQ_RDMA_WQE_RESERVED0_MASK                0x1
+#define RDMA_SQ_RDMA_WQE_RESERVED0_SHIFT       7
        u8 wqe_size;
        u8 prev_wqe_size;
        struct regpair remote_va;
index 63bc2ef..4f7bd3b 100644 (file)
@@ -94,7 +94,7 @@ struct pvrdma_cq {
        u32 cq_handle;
        bool is_kernel;
        atomic_t refcnt;
-       wait_queue_head_t wait;
+       struct completion free;
 };
 
 struct pvrdma_id_table {
@@ -175,7 +175,7 @@ struct pvrdma_srq {
        u32 srq_handle;
        int npages;
        refcount_t refcnt;
-       wait_queue_head_t wait;
+       struct completion free;
 };
 
 struct pvrdma_qp {
@@ -197,7 +197,7 @@ struct pvrdma_qp {
        bool is_kernel;
        struct mutex mutex; /* QP state mutex. */
        atomic_t refcnt;
-       wait_queue_head_t wait;
+       struct completion free;
 };
 
 struct pvrdma_dev {
index 3562c0c..e529622 100644 (file)
@@ -179,7 +179,7 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
                pvrdma_page_dir_insert_umem(&cq->pdir, cq->umem, 0);
 
        atomic_set(&cq->refcnt, 1);
-       init_waitqueue_head(&cq->wait);
+       init_completion(&cq->free);
        spin_lock_init(&cq->cq_lock);
 
        memset(cmd, 0, sizeof(*cmd));
@@ -230,8 +230,9 @@ err_cq:
 
 static void pvrdma_free_cq(struct pvrdma_dev *dev, struct pvrdma_cq *cq)
 {
-       atomic_dec(&cq->refcnt);
-       wait_event(cq->wait, !atomic_read(&cq->refcnt));
+       if (atomic_dec_and_test(&cq->refcnt))
+               complete(&cq->free);
+       wait_for_completion(&cq->free);
 
        if (!cq->is_kernel)
                ib_umem_release(cq->umem);
index 1f4e187..e926818 100644 (file)
@@ -346,9 +346,8 @@ static void pvrdma_qp_event(struct pvrdma_dev *dev, u32 qpn, int type)
                ibqp->event_handler(&e, ibqp->qp_context);
        }
        if (qp) {
-               atomic_dec(&qp->refcnt);
-               if (atomic_read(&qp->refcnt) == 0)
-                       wake_up(&qp->wait);
+               if (atomic_dec_and_test(&qp->refcnt))
+                       complete(&qp->free);
        }
 }
 
@@ -373,9 +372,8 @@ static void pvrdma_cq_event(struct pvrdma_dev *dev, u32 cqn, int type)
                ibcq->event_handler(&e, ibcq->cq_context);
        }
        if (cq) {
-               atomic_dec(&cq->refcnt);
-               if (atomic_read(&cq->refcnt) == 0)
-                       wake_up(&cq->wait);
+               if (atomic_dec_and_test(&cq->refcnt))
+                       complete(&cq->free);
        }
 }
 
@@ -404,7 +402,7 @@ static void pvrdma_srq_event(struct pvrdma_dev *dev, u32 srqn, int type)
        }
        if (srq) {
                if (refcount_dec_and_test(&srq->refcnt))
-                       wake_up(&srq->wait);
+                       complete(&srq->free);
        }
 }
 
@@ -539,9 +537,8 @@ static irqreturn_t pvrdma_intrx_handler(int irq, void *dev_id)
                if (cq && cq->ibcq.comp_handler)
                        cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
                if (cq) {
-                       atomic_dec(&cq->refcnt);
-                       if (atomic_read(&cq->refcnt))
-                               wake_up(&cq->wait);
+                       if (atomic_dec_and_test(&cq->refcnt))
+                               complete(&cq->free);
                }
                pvrdma_idx_ring_inc(&ring->cons_head, ring_slots);
        }
index 10420a1..4059308 100644 (file)
@@ -246,7 +246,7 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
                spin_lock_init(&qp->rq.lock);
                mutex_init(&qp->mutex);
                atomic_set(&qp->refcnt, 1);
-               init_waitqueue_head(&qp->wait);
+               init_completion(&qp->free);
 
                qp->state = IB_QPS_RESET;
 
@@ -428,8 +428,16 @@ static void pvrdma_free_qp(struct pvrdma_qp *qp)
 
        pvrdma_unlock_cqs(scq, rcq, &scq_flags, &rcq_flags);
 
-       atomic_dec(&qp->refcnt);
-       wait_event(qp->wait, !atomic_read(&qp->refcnt));
+       if (atomic_dec_and_test(&qp->refcnt))
+               complete(&qp->free);
+       wait_for_completion(&qp->free);
+
+       if (!qp->is_kernel) {
+               if (qp->rumem)
+                       ib_umem_release(qp->rumem);
+               if (qp->sumem)
+                       ib_umem_release(qp->sumem);
+       }
 
        pvrdma_page_dir_cleanup(dev, &qp->pdir);
 
index 826ccb8..5acebb1 100644 (file)
@@ -149,7 +149,7 @@ struct ib_srq *pvrdma_create_srq(struct ib_pd *pd,
 
        spin_lock_init(&srq->lock);
        refcount_set(&srq->refcnt, 1);
-       init_waitqueue_head(&srq->wait);
+       init_completion(&srq->free);
 
        dev_dbg(&dev->pdev->dev,
                "create shared receive queue from user space\n");
@@ -236,8 +236,9 @@ static void pvrdma_free_srq(struct pvrdma_dev *dev, struct pvrdma_srq *srq)
        dev->srq_tbl[srq->srq_handle] = NULL;
        spin_unlock_irqrestore(&dev->srq_tbl_lock, flags);
 
-       refcount_dec(&srq->refcnt);
-       wait_event(srq->wait, !refcount_read(&srq->refcnt));
+       if (refcount_dec_and_test(&srq->refcnt))
+               complete(&srq->free);
+       wait_for_completion(&srq->free);
 
        /* There is no support for kernel clients, so this is safe. */
        ib_umem_release(srq->umem);
index 3b96cda..e6151a2 100644 (file)
@@ -1236,13 +1236,10 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
                ipoib_ib_dev_down(dev);
 
        if (level == IPOIB_FLUSH_HEAVY) {
-               rtnl_lock();
                if (test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
                        ipoib_ib_dev_stop(dev);
 
-               result = ipoib_ib_dev_open(dev);
-               rtnl_unlock();
-               if (result)
+               if (ipoib_ib_dev_open(dev))
                        return;
 
                if (netif_queue_stopped(dev))
@@ -1282,7 +1279,9 @@ void ipoib_ib_dev_flush_heavy(struct work_struct *work)
        struct ipoib_dev_priv *priv =
                container_of(work, struct ipoib_dev_priv, flush_heavy);
 
+       rtnl_lock();
        __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_HEAVY, 0);
+       rtnl_unlock();
 }
 
 void ipoib_ib_dev_cleanup(struct net_device *dev)
index fd83c7f..f3654fd 100644 (file)
@@ -186,7 +186,7 @@ void led_blink_set(struct led_classdev *led_cdev,
                   unsigned long *delay_on,
                   unsigned long *delay_off)
 {
-       del_timer_sync(&led_cdev->blink_timer);
+       led_stop_software_blink(led_cdev);
 
        clear_bit(LED_BLINK_ONESHOT, &led_cdev->work_flags);
        clear_bit(LED_BLINK_ONESHOT_STOP, &led_cdev->work_flags);
index c971407..59c82cd 100644 (file)
@@ -377,6 +377,7 @@ static int cros_ec_pkt_xfer_spi(struct cros_ec_device *ec_dev,
        u8 *ptr;
        u8 *rx_buf;
        u8 sum;
+       u8 rx_byte;
        int ret = 0, final_ret;
 
        len = cros_ec_prepare_tx(ec_dev, ec_msg);
@@ -421,25 +422,22 @@ static int cros_ec_pkt_xfer_spi(struct cros_ec_device *ec_dev,
        if (!ret) {
                /* Verify that EC can process command */
                for (i = 0; i < len; i++) {
-                       switch (rx_buf[i]) {
-                       case EC_SPI_PAST_END:
-                       case EC_SPI_RX_BAD_DATA:
-                       case EC_SPI_NOT_READY:
-                               ret = -EAGAIN;
-                               ec_msg->result = EC_RES_IN_PROGRESS;
-                       default:
+                       rx_byte = rx_buf[i];
+                       if (rx_byte == EC_SPI_PAST_END  ||
+                           rx_byte == EC_SPI_RX_BAD_DATA ||
+                           rx_byte == EC_SPI_NOT_READY) {
+                               ret = -EREMOTEIO;
                                break;
                        }
-                       if (ret)
-                               break;
                }
-               if (!ret)
-                       ret = cros_ec_spi_receive_packet(ec_dev,
-                                       ec_msg->insize + sizeof(*response));
-       } else {
-               dev_err(ec_dev->dev, "spi transfer failed: %d\n", ret);
        }
 
+       if (!ret)
+               ret = cros_ec_spi_receive_packet(ec_dev,
+                               ec_msg->insize + sizeof(*response));
+       else
+               dev_err(ec_dev->dev, "spi transfer failed: %d\n", ret);
+
        final_ret = terminate_request(ec_dev);
 
        spi_bus_unlock(ec_spi->spi->master);
@@ -508,6 +506,7 @@ static int cros_ec_cmd_xfer_spi(struct cros_ec_device *ec_dev,
        int i, len;
        u8 *ptr;
        u8 *rx_buf;
+       u8 rx_byte;
        int sum;
        int ret = 0, final_ret;
 
@@ -544,25 +543,22 @@ static int cros_ec_cmd_xfer_spi(struct cros_ec_device *ec_dev,
        if (!ret) {
                /* Verify that EC can process command */
                for (i = 0; i < len; i++) {
-                       switch (rx_buf[i]) {
-                       case EC_SPI_PAST_END:
-                       case EC_SPI_RX_BAD_DATA:
-                       case EC_SPI_NOT_READY:
-                               ret = -EAGAIN;
-                               ec_msg->result = EC_RES_IN_PROGRESS;
-                       default:
+                       rx_byte = rx_buf[i];
+                       if (rx_byte == EC_SPI_PAST_END  ||
+                           rx_byte == EC_SPI_RX_BAD_DATA ||
+                           rx_byte == EC_SPI_NOT_READY) {
+                               ret = -EREMOTEIO;
                                break;
                        }
-                       if (ret)
-                               break;
                }
-               if (!ret)
-                       ret = cros_ec_spi_receive_response(ec_dev,
-                                       ec_msg->insize + EC_MSG_TX_PROTO_BYTES);
-       } else {
-               dev_err(ec_dev->dev, "spi transfer failed: %d\n", ret);
        }
 
+       if (!ret)
+               ret = cros_ec_spi_receive_response(ec_dev,
+                               ec_msg->insize + EC_MSG_TX_PROTO_BYTES);
+       else
+               dev_err(ec_dev->dev, "spi transfer failed: %d\n", ret);
+
        final_ret = terminate_request(ec_dev);
 
        spi_bus_unlock(ec_spi->spi->master);
@@ -667,6 +663,7 @@ static int cros_ec_spi_probe(struct spi_device *spi)
                           sizeof(struct ec_response_get_protocol_info);
        ec_dev->dout_size = sizeof(struct ec_host_request);
 
+       ec_spi->last_transfer_ns = ktime_get_ns();
 
        err = cros_ec_register(ec_dev);
        if (err) {
index da16bf4..dc94ffc 100644 (file)
@@ -159,13 +159,18 @@ unsigned int twl4030_audio_get_mclk(void)
 EXPORT_SYMBOL_GPL(twl4030_audio_get_mclk);
 
 static bool twl4030_audio_has_codec(struct twl4030_audio_data *pdata,
-                             struct device_node *node)
+                             struct device_node *parent)
 {
+       struct device_node *node;
+
        if (pdata && pdata->codec)
                return true;
 
-       if (of_find_node_by_name(node, "codec"))
+       node = of_get_child_by_name(parent, "codec");
+       if (node) {
+               of_node_put(node);
                return true;
+       }
 
        return false;
 }
index d66502d..dd19f17 100644 (file)
@@ -97,12 +97,16 @@ static struct reg_sequence twl6040_patch[] = {
 };
 
 
-static bool twl6040_has_vibra(struct device_node *node)
+static bool twl6040_has_vibra(struct device_node *parent)
 {
-#ifdef CONFIG_OF
-       if (of_find_node_by_name(node, "vibra"))
+       struct device_node *node;
+
+       node = of_get_child_by_name(parent, "vibra");
+       if (node) {
+               of_node_put(node);
                return true;
-#endif
+       }
+
        return false;
 }
 
index eda38cb..41f2a9f 100644 (file)
@@ -32,7 +32,7 @@
 #include <linux/pci.h>
 #include <linux/mutex.h>
 #include <linux/miscdevice.h>
-#include <linux/pti.h>
+#include <linux/intel-pti.h>
 #include <linux/slab.h>
 #include <linux/uaccess.h>
 
index f80e911..73b6055 100644 (file)
@@ -1114,7 +1114,7 @@ static int mtd_check_oob_ops(struct mtd_info *mtd, loff_t offs,
        if (!ops->oobbuf)
                ops->ooblen = 0;
 
-       if (offs < 0 || offs + ops->len >= mtd->size)
+       if (offs < 0 || offs + ops->len > mtd->size)
                return -EINVAL;
 
        if (ops->ooblen) {
index e0eb51d..dd56a67 100644 (file)
@@ -1763,7 +1763,7 @@ try_dmaread:
                        err = brcmstb_nand_verify_erased_page(mtd, chip, buf,
                                                              addr);
                        /* erased page bitflips corrected */
-                       if (err > 0)
+                       if (err >= 0)
                                return err;
                }
 
index 484f7fb..a8bde66 100644 (file)
@@ -253,9 +253,9 @@ static int gpio_nand_probe(struct platform_device *pdev)
                goto out_ce;
        }
 
-       gpiomtd->nwp = devm_gpiod_get(dev, "ale", GPIOD_OUT_LOW);
-       if (IS_ERR(gpiomtd->nwp)) {
-               ret = PTR_ERR(gpiomtd->nwp);
+       gpiomtd->ale = devm_gpiod_get(dev, "ale", GPIOD_OUT_LOW);
+       if (IS_ERR(gpiomtd->ale)) {
+               ret = PTR_ERR(gpiomtd->ale);
                goto out_ce;
        }
 
index 50f8d4a..d4d824e 100644 (file)
@@ -1067,9 +1067,6 @@ static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
                return ret;
        }
 
-       /* handle the block mark swapping */
-       block_mark_swapping(this, payload_virt, auxiliary_virt);
-
        /* Loop over status bytes, accumulating ECC status. */
        status = auxiliary_virt + nfc_geo->auxiliary_status_offset;
 
@@ -1158,6 +1155,9 @@ static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
                max_bitflips = max_t(unsigned int, max_bitflips, *status);
        }
 
+       /* handle the block mark swapping */
+       block_mark_swapping(this, buf, auxiliary_virt);
+
        if (oob_required) {
                /*
                 * It's time to deliver the OOB bytes. See gpmi_ecc_read_oob()
index 8a9b085..58c705f 100644 (file)
@@ -1431,13 +1431,9 @@ static int bond_option_ad_actor_system_set(struct bonding *bond,
 {
        u8 macaddr[ETH_ALEN];
        u8 *mac;
-       int i;
 
        if (newval->string) {
-               i = sscanf(newval->string, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx",
-                          &macaddr[0], &macaddr[1], &macaddr[2],
-                          &macaddr[3], &macaddr[4], &macaddr[5]);
-               if (i != ETH_ALEN)
+               if (!mac_pton(newval->string, macaddr))
                        goto err;
                mac = macaddr;
        } else {
index f412aad..2dead7f 100644 (file)
@@ -249,7 +249,6 @@ static int lan9303_read(struct regmap *regmap, unsigned int offset, u32 *reg)
        return -EIO;
 }
 
-/* Wait a while until mask & reg == value. Otherwise return timeout. */
 static int lan9303_read_wait(struct lan9303 *chip, int offset, u32 mask)
 {
        int i;
@@ -480,7 +479,8 @@ static int lan9303_detect_phy_setup(struct lan9303 *chip)
 {
        int reg;
 
-       /* depending on the 'phy_addr_sel_strap' setting, the three phys are
+       /* Calculate chip->phy_addr_base:
+        * Depending on the 'phy_addr_sel_strap' setting, the three phys are
         * using IDs 0-1-2 or IDs 1-2-3. We cannot read back the
         * 'phy_addr_sel_strap' setting directly, so we need a test, which
         * configuration is active:
@@ -495,13 +495,10 @@ static int lan9303_detect_phy_setup(struct lan9303 *chip)
                return reg;
        }
 
-       if ((reg != 0) && (reg != 0xffff))
-               chip->phy_addr_sel_strap = 1;
-       else
-               chip->phy_addr_sel_strap = 0;
+       chip->phy_addr_base = reg != 0 && reg != 0xffff;
 
        dev_dbg(chip->dev, "Phy setup '%s' detected\n",
-               chip->phy_addr_sel_strap ? "1-2-3" : "0-1-2");
+               chip->phy_addr_base ? "1-2-3" : "0-1-2");
 
        return 0;
 }
@@ -541,20 +538,19 @@ lan9303_alr_cache_find_mac(struct lan9303 *chip, const u8 *mac_addr)
        return NULL;
 }
 
-/* Wait a while until mask & reg == value. Otherwise return timeout. */
-static int lan9303_csr_reg_wait(struct lan9303 *chip, int regno,
-                               int mask, char value)
+static int lan9303_csr_reg_wait(struct lan9303 *chip, int regno, u32 mask)
 {
        int i;
 
-       for (i = 0; i < 0x1000; i++) {
+       for (i = 0; i < 25; i++) {
                u32 reg;
 
                lan9303_read_switch_reg(chip, regno, &reg);
-               if ((reg & mask) == value)
+               if (!(reg & mask))
                        return 0;
                usleep_range(1000, 2000);
        }
+
        return -ETIMEDOUT;
 }
 
@@ -564,8 +560,7 @@ static int lan9303_alr_make_entry_raw(struct lan9303 *chip, u32 dat0, u32 dat1)
        lan9303_write_switch_reg(chip, LAN9303_SWE_ALR_WR_DAT_1, dat1);
        lan9303_write_switch_reg(chip, LAN9303_SWE_ALR_CMD,
                                 LAN9303_ALR_CMD_MAKE_ENTRY);
-       lan9303_csr_reg_wait(chip, LAN9303_SWE_ALR_CMD_STS, ALR_STS_MAKE_PEND,
-                            0);
+       lan9303_csr_reg_wait(chip, LAN9303_SWE_ALR_CMD_STS, ALR_STS_MAKE_PEND);
        lan9303_write_switch_reg(chip, LAN9303_SWE_ALR_CMD, 0);
 
        return 0;
@@ -870,7 +865,7 @@ static int lan9303_check_device(struct lan9303 *chip)
        if ((reg >> 16) != LAN9303_CHIP_ID) {
                dev_err(chip->dev, "expecting LAN9303 chip, but found: %X\n",
                        reg >> 16);
-               return ret;
+               return -ENODEV;
        }
 
        /* The default state of the LAN9303 device is to forward packets between
@@ -1022,7 +1017,7 @@ static int lan9303_get_sset_count(struct dsa_switch *ds)
 static int lan9303_phy_read(struct dsa_switch *ds, int phy, int regnum)
 {
        struct lan9303 *chip = ds->priv;
-       int phy_base = chip->phy_addr_sel_strap;
+       int phy_base = chip->phy_addr_base;
 
        if (phy == phy_base)
                return lan9303_virt_phy_reg_read(chip, regnum);
@@ -1036,7 +1031,7 @@ static int lan9303_phy_write(struct dsa_switch *ds, int phy, int regnum,
                             u16 val)
 {
        struct lan9303 *chip = ds->priv;
-       int phy_base = chip->phy_addr_sel_strap;
+       int phy_base = chip->phy_addr_base;
 
        if (phy == phy_base)
                return lan9303_virt_phy_reg_write(chip, regnum, val);
@@ -1073,7 +1068,7 @@ static void lan9303_adjust_link(struct dsa_switch *ds, int port,
 
        res =  lan9303_phy_write(ds, port, MII_BMCR, ctl);
 
-       if (port == chip->phy_addr_sel_strap) {
+       if (port == chip->phy_addr_base) {
                /* Virtual Phy: Remove Turbo 200Mbit mode */
                lan9303_read(chip->regmap, LAN9303_VIRT_SPECIAL_CTRL, &ctl);
 
@@ -1097,8 +1092,7 @@ static void lan9303_port_disable(struct dsa_switch *ds, int port,
        struct lan9303 *chip = ds->priv;
 
        lan9303_disable_processing_port(chip, port);
-       lan9303_phy_write(ds, chip->phy_addr_sel_strap + port,
-                         MII_BMCR, BMCR_PDOWN);
+       lan9303_phy_write(ds, chip->phy_addr_base + port, MII_BMCR, BMCR_PDOWN);
 }
 
 static int lan9303_port_bridge_join(struct dsa_switch *ds, int port,
@@ -1286,13 +1280,16 @@ static const struct dsa_switch_ops lan9303_switch_ops = {
 
 static int lan9303_register_switch(struct lan9303 *chip)
 {
+       int base;
+
        chip->ds = dsa_switch_alloc(chip->dev, LAN9303_NUM_PORTS);
        if (!chip->ds)
                return -ENOMEM;
 
        chip->ds->priv = chip;
        chip->ds->ops = &lan9303_switch_ops;
-       chip->ds->phys_mii_mask = chip->phy_addr_sel_strap ? 0xe : 0x7;
+       base = chip->phy_addr_base;
+       chip->ds->phys_mii_mask = GENMASK(LAN9303_NUM_PORTS - 1 + base, base);
 
        return dsa_register_switch(chip->ds);
 }
index c604213..d50519e 100644 (file)
@@ -170,6 +170,7 @@ source "drivers/net/ethernet/sis/Kconfig"
 source "drivers/net/ethernet/sfc/Kconfig"
 source "drivers/net/ethernet/sgi/Kconfig"
 source "drivers/net/ethernet/smsc/Kconfig"
+source "drivers/net/ethernet/socionext/Kconfig"
 source "drivers/net/ethernet/stmicro/Kconfig"
 source "drivers/net/ethernet/sun/Kconfig"
 source "drivers/net/ethernet/tehuti/Kconfig"
index 39f6273..6cf5ade 100644 (file)
@@ -82,6 +82,7 @@ obj-$(CONFIG_SFC) += sfc/
 obj-$(CONFIG_SFC_FALCON) += sfc/falcon/
 obj-$(CONFIG_NET_VENDOR_SGI) += sgi/
 obj-$(CONFIG_NET_VENDOR_SMSC) += smsc/
+obj-$(CONFIG_NET_VENDOR_SOCIONEXT) += socionext/
 obj-$(CONFIG_NET_VENDOR_STMICRO) += stmicro/
 obj-$(CONFIG_NET_VENDOR_SUN) += sun/
 obj-$(CONFIG_NET_VENDOR_TEHUTI) += tehuti/
index b11e573..ea149c1 100644 (file)
@@ -504,3 +504,14 @@ int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, u16 *req_id)
 
        return 0;
 }
+
+bool ena_com_cq_empty(struct ena_com_io_cq *io_cq)
+{
+       struct ena_eth_io_rx_cdesc_base *cdesc;
+
+       cdesc = ena_com_get_next_rx_cdesc(io_cq);
+       if (cdesc)
+               return false;
+       else
+               return true;
+}
index bb53c3a..2f76572 100644 (file)
@@ -88,6 +88,8 @@ int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
 
 int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, u16 *req_id);
 
+bool ena_com_cq_empty(struct ena_com_io_cq *io_cq);
+
 static inline void ena_com_unmask_intr(struct ena_com_io_cq *io_cq,
                                       struct ena_eth_io_intr_reg *intr_reg)
 {
index 97c5a89..a6f2832 100644 (file)
@@ -158,6 +158,8 @@ static void ena_init_io_rings_common(struct ena_adapter *adapter,
        ring->per_napi_packets = 0;
        ring->per_napi_bytes = 0;
        ring->cpu = 0;
+       ring->first_interrupt = false;
+       ring->no_interrupt_event_cnt = 0;
        u64_stats_init(&ring->syncp);
 }
 
@@ -1274,6 +1276,9 @@ static irqreturn_t ena_intr_msix_io(int irq, void *data)
 {
        struct ena_napi *ena_napi = data;
 
+       ena_napi->tx_ring->first_interrupt = true;
+       ena_napi->rx_ring->first_interrupt = true;
+
        napi_schedule_irqoff(&ena_napi->napi);
 
        return IRQ_HANDLED;
@@ -2648,8 +2653,32 @@ static void ena_fw_reset_device(struct work_struct *work)
        rtnl_unlock();
 }
 
-static int check_missing_comp_in_queue(struct ena_adapter *adapter,
-                                      struct ena_ring *tx_ring)
+static int check_for_rx_interrupt_queue(struct ena_adapter *adapter,
+                                       struct ena_ring *rx_ring)
+{
+       if (likely(rx_ring->first_interrupt))
+               return 0;
+
+       if (ena_com_cq_empty(rx_ring->ena_com_io_cq))
+               return 0;
+
+       rx_ring->no_interrupt_event_cnt++;
+
+       if (rx_ring->no_interrupt_event_cnt == ENA_MAX_NO_INTERRUPT_ITERATIONS) {
+               netif_err(adapter, rx_err, adapter->netdev,
+                         "Potential MSIX issue on Rx side Queue = %d. Reset the device\n",
+                         rx_ring->qid);
+               adapter->reset_reason = ENA_REGS_RESET_MISS_INTERRUPT;
+               smp_mb__before_atomic();
+               set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
+               return -EIO;
+       }
+
+       return 0;
+}
+
+static int check_missing_comp_in_tx_queue(struct ena_adapter *adapter,
+                                         struct ena_ring *tx_ring)
 {
        struct ena_tx_buffer *tx_buf;
        unsigned long last_jiffies;
@@ -2659,8 +2688,27 @@ static int check_missing_comp_in_queue(struct ena_adapter *adapter,
        for (i = 0; i < tx_ring->ring_size; i++) {
                tx_buf = &tx_ring->tx_buffer_info[i];
                last_jiffies = tx_buf->last_jiffies;
-               if (unlikely(last_jiffies &&
-                            time_is_before_jiffies(last_jiffies + adapter->missing_tx_completion_to))) {
+
+               if (last_jiffies == 0)
+                       /* no pending Tx at this location */
+                       continue;
+
+               if (unlikely(!tx_ring->first_interrupt && time_is_before_jiffies(last_jiffies +
+                            2 * adapter->missing_tx_completion_to))) {
+                       /* If after graceful period interrupt is still not
+                        * received, we schedule a reset
+                        */
+                       netif_err(adapter, tx_err, adapter->netdev,
+                                 "Potential MSIX issue on Tx side Queue = %d. Reset the device\n",
+                                 tx_ring->qid);
+                       adapter->reset_reason = ENA_REGS_RESET_MISS_INTERRUPT;
+                       smp_mb__before_atomic();
+                       set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
+                       return -EIO;
+               }
+
+               if (unlikely(time_is_before_jiffies(last_jiffies +
+                               adapter->missing_tx_completion_to))) {
                        if (!tx_buf->print_once)
                                netif_notice(adapter, tx_err, adapter->netdev,
                                             "Found a Tx that wasn't completed on time, qid %d, index %d.\n",
@@ -2689,9 +2737,10 @@ static int check_missing_comp_in_queue(struct ena_adapter *adapter,
        return rc;
 }
 
-static void check_for_missing_tx_completions(struct ena_adapter *adapter)
+static void check_for_missing_completions(struct ena_adapter *adapter)
 {
        struct ena_ring *tx_ring;
+       struct ena_ring *rx_ring;
        int i, budget, rc;
 
        /* Make sure the driver doesn't turn the device in other process */
@@ -2710,8 +2759,13 @@ static void check_for_missing_tx_completions(struct ena_adapter *adapter)
 
        for (i = adapter->last_monitored_tx_qid; i < adapter->num_queues; i++) {
                tx_ring = &adapter->tx_ring[i];
+               rx_ring = &adapter->rx_ring[i];
+
+               rc = check_missing_comp_in_tx_queue(adapter, tx_ring);
+               if (unlikely(rc))
+                       return;
 
-               rc = check_missing_comp_in_queue(adapter, tx_ring);
+               rc = check_for_rx_interrupt_queue(adapter, rx_ring);
                if (unlikely(rc))
                        return;
 
@@ -2870,7 +2924,7 @@ static void ena_timer_service(struct timer_list *t)
 
        check_for_admin_com_state(adapter);
 
-       check_for_missing_tx_completions(adapter);
+       check_for_missing_completions(adapter);
 
        check_for_empty_rx_ring(adapter);
 
index 3bbc003..f1972b5 100644 (file)
@@ -44,7 +44,7 @@
 #include "ena_eth_com.h"
 
 #define DRV_MODULE_VER_MAJOR   1
-#define DRV_MODULE_VER_MINOR   3
+#define DRV_MODULE_VER_MINOR   5
 #define DRV_MODULE_VER_SUBMINOR 0
 
 #define DRV_MODULE_NAME                "ena"
  * We wait for 6 sec just to be on the safe side.
  */
 #define ENA_DEVICE_KALIVE_TIMEOUT      (6 * HZ)
+#define ENA_MAX_NO_INTERRUPT_ITERATIONS 3
 
 #define ENA_MMIO_DISABLE_REG_READ      BIT(0)
 
@@ -236,6 +237,9 @@ struct ena_ring {
        /* The maximum header length the device can handle */
        u8 tx_max_header_size;
 
+       bool first_interrupt;
+       u16 no_interrupt_event_cnt;
+
        /* cpu for TPH */
        int cpu;
         /* number of tx/rx_buffer_info's entries */
index 9aec43c..48ca97f 100644 (file)
@@ -60,6 +60,8 @@ enum ena_regs_reset_reason_types {
        ENA_REGS_RESET_USER_TRIGGER             = 12,
 
        ENA_REGS_RESET_GENERIC                  = 13,
+
+       ENA_REGS_RESET_MISS_INTERRUPT           = 14,
 };
 
 /* ena_registers offsets */
index a74a8fb..7a3ebfd 100644 (file)
@@ -2930,9 +2930,8 @@ void xgbe_dump_rx_desc(struct xgbe_prv_data *pdata, struct xgbe_ring *ring,
 void xgbe_print_pkt(struct net_device *netdev, struct sk_buff *skb, bool tx_rx)
 {
        struct ethhdr *eth = (struct ethhdr *)skb->data;
-       unsigned char *buf = skb->data;
        unsigned char buffer[128];
-       unsigned int i, j;
+       unsigned int i;
 
        netdev_dbg(netdev, "\n************** SKB dump ****************\n");
 
@@ -2943,22 +2942,13 @@ void xgbe_print_pkt(struct net_device *netdev, struct sk_buff *skb, bool tx_rx)
        netdev_dbg(netdev, "Src MAC addr: %pM\n", eth->h_source);
        netdev_dbg(netdev, "Protocol: %#06hx\n", ntohs(eth->h_proto));
 
-       for (i = 0, j = 0; i < skb->len;) {
-               j += snprintf(buffer + j, sizeof(buffer) - j, "%02hhx",
-                             buf[i++]);
-
-               if ((i % 32) == 0) {
-                       netdev_dbg(netdev, "  %#06x: %s\n", i - 32, buffer);
-                       j = 0;
-               } else if ((i % 16) == 0) {
-                       buffer[j++] = ' ';
-                       buffer[j++] = ' ';
-               } else if ((i % 4) == 0) {
-                       buffer[j++] = ' ';
-               }
+       for (i = 0; i < skb->len; i += 32) {
+               unsigned int len = min(skb->len - i, 32U);
+
+               hex_dump_to_buffer(&skb->data[i], len, 32, 1,
+                                  buffer, sizeof(buffer), false);
+               netdev_dbg(netdev, "  %#06x: %s\n", i, buffer);
        }
-       if (i % 32)
-               netdev_dbg(netdev, "  %#06x: %s\n", i - (i % 32), buffer);
 
        netdev_dbg(netdev, "\n************** SKB dump ****************\n");
 }
index 3c63b16..d9efbc8 100644 (file)
@@ -159,6 +159,8 @@ struct arc_emac_priv {
        unsigned int link;
        unsigned int duplex;
        unsigned int speed;
+
+       unsigned int rx_missed_errors;
 };
 
 /**
index 3241af1..bd277b0 100644 (file)
@@ -26,6 +26,8 @@
 
 #include "emac.h"
 
+static void arc_emac_restart(struct net_device *ndev);
+
 /**
  * arc_emac_tx_avail - Return the number of available slots in the tx ring.
  * @priv: Pointer to ARC EMAC private data structure.
@@ -210,39 +212,48 @@ static int arc_emac_rx(struct net_device *ndev, int budget)
                        continue;
                }
 
-               pktlen = info & LEN_MASK;
-               stats->rx_packets++;
-               stats->rx_bytes += pktlen;
-               skb = rx_buff->skb;
-               skb_put(skb, pktlen);
-               skb->dev = ndev;
-               skb->protocol = eth_type_trans(skb, ndev);
-
-               dma_unmap_single(&ndev->dev, dma_unmap_addr(rx_buff, addr),
-                                dma_unmap_len(rx_buff, len), DMA_FROM_DEVICE);
-
-               /* Prepare the BD for next cycle */
-               rx_buff->skb = netdev_alloc_skb_ip_align(ndev,
-                                                        EMAC_BUFFER_SIZE);
-               if (unlikely(!rx_buff->skb)) {
+               /* Prepare the BD for next cycle. netif_receive_skb()
+                * only if new skb was allocated and mapped to avoid holes
+                * in the RX fifo.
+                */
+               skb = netdev_alloc_skb_ip_align(ndev, EMAC_BUFFER_SIZE);
+               if (unlikely(!skb)) {
+                       if (net_ratelimit())
+                               netdev_err(ndev, "cannot allocate skb\n");
+                       /* Return ownership to EMAC */
+                       rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE);
                        stats->rx_errors++;
-                       /* Because receive_skb is below, increment rx_dropped */
                        stats->rx_dropped++;
                        continue;
                }
 
-               /* receive_skb only if new skb was allocated to avoid holes */
-               netif_receive_skb(skb);
-
-               addr = dma_map_single(&ndev->dev, (void *)rx_buff->skb->data,
+               addr = dma_map_single(&ndev->dev, (void *)skb->data,
                                      EMAC_BUFFER_SIZE, DMA_FROM_DEVICE);
                if (dma_mapping_error(&ndev->dev, addr)) {
                        if (net_ratelimit())
-                               netdev_err(ndev, "cannot dma map\n");
-                       dev_kfree_skb(rx_buff->skb);
+                               netdev_err(ndev, "cannot map dma buffer\n");
+                       dev_kfree_skb(skb);
+                       /* Return ownership to EMAC */
+                       rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE);
                        stats->rx_errors++;
+                       stats->rx_dropped++;
                        continue;
                }
+
+               /* unmap previosly mapped skb */
+               dma_unmap_single(&ndev->dev, dma_unmap_addr(rx_buff, addr),
+                                dma_unmap_len(rx_buff, len), DMA_FROM_DEVICE);
+
+               pktlen = info & LEN_MASK;
+               stats->rx_packets++;
+               stats->rx_bytes += pktlen;
+               skb_put(rx_buff->skb, pktlen);
+               rx_buff->skb->dev = ndev;
+               rx_buff->skb->protocol = eth_type_trans(rx_buff->skb, ndev);
+
+               netif_receive_skb(rx_buff->skb);
+
+               rx_buff->skb = skb;
                dma_unmap_addr_set(rx_buff, addr, addr);
                dma_unmap_len_set(rx_buff, len, EMAC_BUFFER_SIZE);
 
@@ -259,6 +270,53 @@ static int arc_emac_rx(struct net_device *ndev, int budget)
 }
 
 /**
+ * arc_emac_rx_miss_handle - handle R_MISS register
+ * @ndev:      Pointer to the net_device structure.
+ */
+static void arc_emac_rx_miss_handle(struct net_device *ndev)
+{
+       struct arc_emac_priv *priv = netdev_priv(ndev);
+       struct net_device_stats *stats = &ndev->stats;
+       unsigned int miss;
+
+       miss = arc_reg_get(priv, R_MISS);
+       if (miss) {
+               stats->rx_errors += miss;
+               stats->rx_missed_errors += miss;
+               priv->rx_missed_errors += miss;
+       }
+}
+
+/**
+ * arc_emac_rx_stall_check - check RX stall
+ * @ndev:      Pointer to the net_device structure.
+ * @budget:    How many BDs requested to process on 1 call.
+ * @work_done: How many BDs processed
+ *
+ * Under certain conditions EMAC stop reception of incoming packets and
+ * continuously increment R_MISS register instead of saving data into
+ * provided buffer. This function detect that condition and restart
+ * EMAC.
+ */
+static void arc_emac_rx_stall_check(struct net_device *ndev,
+                                   int budget, unsigned int work_done)
+{
+       struct arc_emac_priv *priv = netdev_priv(ndev);
+       struct arc_emac_bd *rxbd;
+
+       if (work_done)
+               priv->rx_missed_errors = 0;
+
+       if (priv->rx_missed_errors && budget) {
+               rxbd = &priv->rxbd[priv->last_rx_bd];
+               if (le32_to_cpu(rxbd->info) & FOR_EMAC) {
+                       arc_emac_restart(ndev);
+                       priv->rx_missed_errors = 0;
+               }
+       }
+}
+
+/**
  * arc_emac_poll - NAPI poll handler.
  * @napi:      Pointer to napi_struct structure.
  * @budget:    How many BDs to process on 1 call.
@@ -272,6 +330,7 @@ static int arc_emac_poll(struct napi_struct *napi, int budget)
        unsigned int work_done;
 
        arc_emac_tx_clean(ndev);
+       arc_emac_rx_miss_handle(ndev);
 
        work_done = arc_emac_rx(ndev, budget);
        if (work_done < budget) {
@@ -279,6 +338,8 @@ static int arc_emac_poll(struct napi_struct *napi, int budget)
                arc_reg_or(priv, R_ENABLE, RXINT_MASK | TXINT_MASK);
        }
 
+       arc_emac_rx_stall_check(ndev, budget, work_done);
+
        return work_done;
 }
 
@@ -320,6 +381,8 @@ static irqreturn_t arc_emac_intr(int irq, void *dev_instance)
                if (status & MSER_MASK) {
                        stats->rx_missed_errors += 0x100;
                        stats->rx_errors += 0x100;
+                       priv->rx_missed_errors += 0x100;
+                       napi_schedule(&priv->napi);
                }
 
                if (status & RXCR_MASK) {
@@ -732,6 +795,63 @@ static int arc_emac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
 }
 
 
+/**
+ * arc_emac_restart - Restart EMAC
+ * @ndev:      Pointer to net_device structure.
+ *
+ * This function do hardware reset of EMAC in order to restore
+ * network packets reception.
+ */
+static void arc_emac_restart(struct net_device *ndev)
+{
+       struct arc_emac_priv *priv = netdev_priv(ndev);
+       struct net_device_stats *stats = &ndev->stats;
+       int i;
+
+       if (net_ratelimit())
+               netdev_warn(ndev, "restarting stalled EMAC\n");
+
+       netif_stop_queue(ndev);
+
+       /* Disable interrupts */
+       arc_reg_clr(priv, R_ENABLE, RXINT_MASK | TXINT_MASK | ERR_MASK);
+
+       /* Disable EMAC */
+       arc_reg_clr(priv, R_CTRL, EN_MASK);
+
+       /* Return the sk_buff to system */
+       arc_free_tx_queue(ndev);
+
+       /* Clean Tx BD's */
+       priv->txbd_curr = 0;
+       priv->txbd_dirty = 0;
+       memset(priv->txbd, 0, TX_RING_SZ);
+
+       for (i = 0; i < RX_BD_NUM; i++) {
+               struct arc_emac_bd *rxbd = &priv->rxbd[i];
+               unsigned int info = le32_to_cpu(rxbd->info);
+
+               if (!(info & FOR_EMAC)) {
+                       stats->rx_errors++;
+                       stats->rx_dropped++;
+               }
+               /* Return ownership to EMAC */
+               rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE);
+       }
+       priv->last_rx_bd = 0;
+
+       /* Make sure info is visible to EMAC before enable */
+       wmb();
+
+       /* Enable interrupts */
+       arc_reg_set(priv, R_ENABLE, RXINT_MASK | TXINT_MASK | ERR_MASK);
+
+       /* Enable EMAC */
+       arc_reg_or(priv, R_CTRL, EN_MASK);
+
+       netif_start_queue(ndev);
+}
+
 static const struct net_device_ops arc_emac_netdev_ops = {
        .ndo_open               = arc_emac_open,
        .ndo_stop               = arc_emac_stop,
index 1fbbbab..14a59e5 100644 (file)
@@ -2128,27 +2128,25 @@ static int bcm_enetsw_open(struct net_device *dev)
 
        /* allocate rx dma ring */
        size = priv->rx_ring_size * sizeof(struct bcm_enet_desc);
-       p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL);
+       p = dma_zalloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL);
        if (!p) {
                dev_err(kdev, "cannot allocate rx ring %u\n", size);
                ret = -ENOMEM;
                goto out_freeirq_tx;
        }
 
-       memset(p, 0, size);
        priv->rx_desc_alloc_size = size;
        priv->rx_desc_cpu = p;
 
        /* allocate tx dma ring */
        size = priv->tx_ring_size * sizeof(struct bcm_enet_desc);
-       p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL);
+       p = dma_zalloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL);
        if (!p) {
                dev_err(kdev, "cannot allocate tx ring\n");
                ret = -ENOMEM;
                goto out_free_rx_ring;
        }
 
-       memset(p, 0, size);
        priv->tx_desc_alloc_size = size;
        priv->tx_desc_cpu = p;
 
index 087f01b..f15a8fc 100644 (file)
@@ -1216,18 +1216,6 @@ static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
                goto out;
        }
 
-       /* The Ethernet switch we are interfaced with needs packets to be at
-        * least 64 bytes (including FCS) otherwise they will be discarded when
-        * they enter the switch port logic. When Broadcom tags are enabled, we
-        * need to make sure that packets are at least 68 bytes
-        * (including FCS and tag) because the length verification is done after
-        * the Broadcom tag is stripped off the ingress packet.
-        */
-       if (skb_put_padto(skb, ETH_ZLEN + ENET_BRCM_TAG_LEN)) {
-               ret = NETDEV_TX_OK;
-               goto out;
-       }
-
        /* Insert TSB and checksum infos */
        if (priv->tsb_en) {
                skb = bcm_sysport_insert_tsb(skb, dev);
index 1d96cd5..8eef9fb 100644 (file)
@@ -128,8 +128,6 @@ bgmac_dma_tx_add_buf(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
        dma_desc->ctl1 = cpu_to_le32(ctl1);
 }
 
-#define ENET_BRCM_TAG_LEN      4
-
 static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac,
                                    struct bgmac_dma_ring *ring,
                                    struct sk_buff *skb)
@@ -142,18 +140,6 @@ static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac,
        u32 flags;
        int i;
 
-       /* The Ethernet switch we are interfaced with needs packets to be at
-        * least 64 bytes (including FCS) otherwise they will be discarded when
-        * they enter the switch port logic. When Broadcom tags are enabled, we
-        * need to make sure that packets are at least 68 bytes
-        * (including FCS and tag) because the length verification is done after
-        * the Broadcom tag is stripped off the ingress packet.
-        */
-       if (netdev_uses_dsa(net_dev)) {
-               if (skb_put_padto(skb, ETH_ZLEN + ENET_BRCM_TAG_LEN))
-                       goto err_stats;
-       }
-
        if (skb->len > BGMAC_DESC_CTL1_LEN) {
                netdev_err(bgmac->net_dev, "Too long skb (%d)\n", skb->len);
                goto err_drop;
@@ -240,7 +226,6 @@ err_dma_head:
 
 err_drop:
        dev_kfree_skb(skb);
-err_stats:
        net_dev->stats.tx_dropped++;
        net_dev->stats.tx_errors++;
        return NETDEV_TX_OK;
index 01b7f2f..57eb26d 100644 (file)
@@ -3029,7 +3029,7 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
 
        del_timer_sync(&bp->timer);
 
-       if (IS_PF(bp)) {
+       if (IS_PF(bp) && !BP_NOMCP(bp)) {
                /* Set ALWAYS_ALIVE bit in shmem */
                bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
                bnx2x_drv_pulse(bp);
@@ -3115,7 +3115,7 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
        bp->cnic_loaded = false;
 
        /* Clear driver version indication in shmem */
-       if (IS_PF(bp))
+       if (IS_PF(bp) && !BP_NOMCP(bp))
                bnx2x_update_mng_version(bp);
 
        /* Check if there are pending parity attentions. If there are - set
index 4d06548..7b08323 100644 (file)
@@ -9578,6 +9578,15 @@ static int bnx2x_init_shmem(struct bnx2x *bp)
 
        do {
                bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
+
+               /* If we read all 0xFFs, means we are in PCI error state and
+                * should bail out to avoid crashes on adapter's FW reads.
+                */
+               if (bp->common.shmem_base == 0xFFFFFFFF) {
+                       bp->flags |= NO_MCP_FLAG;
+                       return -ENODEV;
+               }
+
                if (bp->common.shmem_base) {
                        val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
                        if (val & SHR_MEM_VALIDITY_MB)
@@ -14322,7 +14331,10 @@ static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
                BNX2X_ERR("IO slot reset --> driver unload\n");
 
                /* MCP should have been reset; Need to wait for validity */
-               bnx2x_init_shmem(bp);
+               if (bnx2x_init_shmem(bp)) {
+                       rtnl_unlock();
+                       return PCI_ERS_RESULT_DISCONNECT;
+               }
 
                if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
                        u32 v;
index 9efbdc6..89c3c87 100644 (file)
@@ -2247,6 +2247,9 @@ static void bnxt_free_rx_rings(struct bnxt *bp)
                if (rxr->xdp_prog)
                        bpf_prog_put(rxr->xdp_prog);
 
+               if (xdp_rxq_info_is_reg(&rxr->xdp_rxq))
+                       xdp_rxq_info_unreg(&rxr->xdp_rxq);
+
                kfree(rxr->rx_tpa);
                rxr->rx_tpa = NULL;
 
@@ -2280,6 +2283,10 @@ static int bnxt_alloc_rx_rings(struct bnxt *bp)
 
                ring = &rxr->rx_ring_struct;
 
+               rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i);
+               if (rc < 0)
+                       return rc;
+
                rc = bnxt_alloc_ring(bp, ring);
                if (rc)
                        return rc;
@@ -2834,6 +2841,9 @@ void bnxt_set_ring_params(struct bnxt *bp)
        bp->cp_ring_mask = bp->cp_bit - 1;
 }
 
+/* Changing allocation mode of RX rings.
+ * TODO: Update when extending xdp_rxq_info to support allocation modes.
+ */
 int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
 {
        if (page_mode) {
index 5359a1f..2d268fc 100644 (file)
@@ -23,6 +23,7 @@
 #include <net/devlink.h>
 #include <net/dst_metadata.h>
 #include <net/switchdev.h>
+#include <net/xdp.h>
 
 struct tx_bd {
        __le32 tx_bd_len_flags_type;
@@ -664,6 +665,7 @@ struct bnxt_rx_ring_info {
 
        struct bnxt_ring_struct rx_ring_struct;
        struct bnxt_ring_struct rx_agg_ring_struct;
+       struct xdp_rxq_info     xdp_rxq;
 };
 
 struct bnxt_cp_ring_info {
index fed37cd..3c746f2 100644 (file)
@@ -278,12 +278,11 @@ static int bnxt_hwrm_set_dcbx_app(struct bnxt *bp, struct dcb_app *app,
 
        n = IEEE_8021QAZ_MAX_TCS;
        data_len = sizeof(*data) + sizeof(*fw_app) * n;
-       data = dma_alloc_coherent(&bp->pdev->dev, data_len, &mapping,
-                                 GFP_KERNEL);
+       data = dma_zalloc_coherent(&bp->pdev->dev, data_len, &mapping,
+                                  GFP_KERNEL);
        if (!data)
                return -ENOMEM;
 
-       memset(data, 0, data_len);
        bnxt_hwrm_cmd_hdr_init(bp, &get, HWRM_FW_GET_STRUCTURED_DATA, -1, -1);
        get.dest_data_addr = cpu_to_le64(mapping);
        get.structure_id = cpu_to_le16(STRUCT_HDR_STRUCT_ID_DCBX_APP);
index 261e584..1389ab5 100644 (file)
@@ -96,6 +96,7 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
        xdp.data = *data_ptr;
        xdp_set_data_meta_invalid(&xdp);
        xdp.data_end = *data_ptr + *len;
+       xdp.rxq = &rxr->xdp_rxq;
        orig_data = xdp.data;
        mapping = rx_buf->mapping - bp->rx_dma_offset;
 
index de51c21..a77ee2f 100644 (file)
@@ -4,11 +4,13 @@
  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
  * Copyright (C) 2004 Sun Microsystems Inc.
- * Copyright (C) 2005-2014 Broadcom Corporation.
+ * Copyright (C) 2005-2016 Broadcom Corporation.
+ * Copyright (C) 2016-2017 Broadcom Limited.
  *
  * Firmware is:
  *     Derived from proprietary unpublished source code,
- *     Copyright (C) 2000-2003 Broadcom Corporation.
+ *     Copyright (C) 2000-2016 Broadcom Corporation.
+ *     Copyright (C) 2016-2017 Broadcom Ltd.
  *
  *     Permission is hereby granted for the distribution of this firmware
  *     data in hexadecimal or equivalent format, provided this copyright
@@ -3225,7 +3227,7 @@ static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
        return 0;
 }
 
-#define NVRAM_CMD_TIMEOUT 5000
+#define NVRAM_CMD_TIMEOUT 10000
 
 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
 {
@@ -10052,6 +10054,16 @@ static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
 
        tw32(GRC_MODE, tp->grc_mode | val);
 
+       /* On one of the AMD platform, MRRS is restricted to 4000 because of
+        * south bridge limitation. As a workaround, Driver is setting MRRS
+        * to 2048 instead of default 4096.
+        */
+       if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
+           tp->pdev->subsystem_device == TG3PCI_SUBDEVICE_ID_DELL_5762) {
+               val = tr32(TG3PCI_DEV_STATUS_CTRL) & ~MAX_READ_REQ_MASK;
+               tw32(TG3PCI_DEV_STATUS_CTRL, val | MAX_READ_REQ_SIZE_2048);
+       }
+
        /* Setup the timer prescalar register.  Clock is always 66Mhz. */
        val = tr32(GRC_MISC_CFG);
        val &= ~0xff;
@@ -14225,7 +14237,10 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu)
        /* Reset PHY, otherwise the read DMA engine will be in a mode that
         * breaks all requests to 256 bytes.
         */
-       if (tg3_asic_rev(tp) == ASIC_REV_57766)
+       if (tg3_asic_rev(tp) == ASIC_REV_57766 ||
+           tg3_asic_rev(tp) == ASIC_REV_5717 ||
+           tg3_asic_rev(tp) == ASIC_REV_5719 ||
+           tg3_asic_rev(tp) == ASIC_REV_5720)
                reset_phy = true;
 
        err = tg3_restart_hw(tp, reset_phy);
@@ -14774,7 +14789,7 @@ static void tg3_get_5717_nvram_info(struct tg3 *tp)
 
 static void tg3_get_5720_nvram_info(struct tg3 *tp)
 {
-       u32 nvcfg1, nvmpinstrp;
+       u32 nvcfg1, nvmpinstrp, nv_status;
 
        nvcfg1 = tr32(NVRAM_CFG1);
        nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
@@ -14786,6 +14801,23 @@ static void tg3_get_5720_nvram_info(struct tg3 *tp)
                }
 
                switch (nvmpinstrp) {
+               case FLASH_5762_MX25L_100:
+               case FLASH_5762_MX25L_200:
+               case FLASH_5762_MX25L_400:
+               case FLASH_5762_MX25L_800:
+               case FLASH_5762_MX25L_160_320:
+                       tp->nvram_pagesize = 4096;
+                       tp->nvram_jedecnum = JEDEC_MACRONIX;
+                       tg3_flag_set(tp, NVRAM_BUFFERED);
+                       tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
+                       tg3_flag_set(tp, FLASH);
+                       nv_status = tr32(NVRAM_AUTOSENSE_STATUS);
+                       tp->nvram_size =
+                               (1 << (nv_status >> AUTOSENSE_DEVID &
+                                               AUTOSENSE_DEVID_MASK)
+                                       << AUTOSENSE_SIZE_IN_MB);
+                       return;
+
                case FLASH_5762_EEPROM_HD:
                        nvmpinstrp = FLASH_5720_EEPROM_HD;
                        break;
index c2d02d0..47f51cc 100644 (file)
@@ -5,7 +5,8 @@
  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
  * Copyright (C) 2001 Jeff Garzik (jgarzik@pobox.com)
  * Copyright (C) 2004 Sun Microsystems Inc.
- * Copyright (C) 2007-2014 Broadcom Corporation.
+ * Copyright (C) 2007-2016 Broadcom Corporation.
+ * Copyright (C) 2016-2017 Broadcom Limited.
  */
 
 #ifndef _T3_H
@@ -96,6 +97,7 @@
 #define TG3PCI_SUBDEVICE_ID_DELL_JAGUAR                0x0106
 #define TG3PCI_SUBDEVICE_ID_DELL_MERLOT                0x0109
 #define TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT   0x010a
+#define TG3PCI_SUBDEVICE_ID_DELL_5762          0x07f0
 #define TG3PCI_SUBVENDOR_ID_COMPAQ             PCI_VENDOR_ID_COMPAQ
 #define TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE     0x007c
 #define TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2   0x009a
 #define TG3PCI_STD_RING_PROD_IDX       0x00000098 /* 64-bit */
 #define TG3PCI_RCV_RET_RING_CON_IDX    0x000000a0 /* 64-bit */
 /* 0xa8 --> 0xb8 unused */
+#define TG3PCI_DEV_STATUS_CTRL         0x000000b4
+#define  MAX_READ_REQ_SIZE_2048                 0x00004000
+#define  MAX_READ_REQ_MASK              0x00007000
 #define TG3PCI_DUAL_MAC_CTRL           0x000000b8
 #define  DUAL_MAC_CTRL_CH_MASK          0x00000003
 #define  DUAL_MAC_CTRL_ID               0x00000004
 #define NVRAM_STAT                     0x00007004
 #define NVRAM_WRDATA                   0x00007008
 #define NVRAM_ADDR                     0x0000700c
-#define  NVRAM_ADDR_MSK                        0x00ffffff
+#define  NVRAM_ADDR_MSK                        0x07ffffff
 #define NVRAM_RDDATA                   0x00007010
 #define NVRAM_CFG1                     0x00007014
 #define  NVRAM_CFG1_FLASHIF_ENAB        0x00000001
 #define  FLASH_5720_EEPROM_LD           0x00000003
 #define  FLASH_5762_EEPROM_HD           0x02000001
 #define  FLASH_5762_EEPROM_LD           0x02000003
+#define  FLASH_5762_MX25L_100           0x00800000
+#define  FLASH_5762_MX25L_200           0x00800002
+#define  FLASH_5762_MX25L_400           0x00800001
+#define  FLASH_5762_MX25L_800           0x00800003
+#define  FLASH_5762_MX25L_160_320       0x03800002
 #define  FLASH_5720VENDOR_M_ATMEL_DB011D 0x01000000
 #define  FLASH_5720VENDOR_M_ATMEL_DB021D 0x01000002
 #define  FLASH_5720VENDOR_M_ATMEL_DB041D 0x01000001
 /* 0x702c unused */
 
 #define NVRAM_ADDR_LOCKOUT             0x00007030
-/* 0x7034 --> 0x7500 unused */
+#define NVRAM_AUTOSENSE_STATUS         0x00007038
+#define AUTOSENSE_DEVID                        0x00000010
+#define AUTOSENSE_DEVID_MASK           0x00000007
+#define AUTOSENSE_SIZE_IN_MB           17
+/* 0x703c --> 0x7500 unused */
 
 #define OTP_MODE                       0x00007500
 #define OTP_MODE_OTP_THRU_GRC           0x00000001
@@ -3373,6 +3387,7 @@ struct tg3 {
 #define JEDEC_ST                       0x20
 #define JEDEC_SAIFUN                   0x4f
 #define JEDEC_SST                      0xbf
+#define JEDEC_MACRONIX                 0xc2
 
 #define ATMEL_AT24C02_CHIP_SIZE                TG3_NVRAM_SIZE_2KB
 #define ATMEL_AT24C02_PAGE_SIZE                (8)
index 2c615ab..f38abf6 100644 (file)
@@ -702,12 +702,10 @@ static struct octeon_device *octeon_allocate_device_mem(u32 pci_id,
        size = octdevsize + priv_size + configsize +
                (sizeof(struct octeon_dispatch) * DISPATCH_LIST_SIZE);
 
-       buf = vmalloc(size);
+       buf = vzalloc(size);
        if (!buf)
                return NULL;
 
-       memset(buf, 0, size);
-
        oct = (struct octeon_device *)buf;
        oct->priv = (void *)(buf + octdevsize);
        oct->chip = (void *)(buf + octdevsize + priv_size);
@@ -840,10 +838,9 @@ octeon_allocate_ioq_vector(struct octeon_device  *oct)
 
        size = sizeof(struct octeon_ioq_vector) * num_ioqs;
 
-       oct->ioq_vector = vmalloc(size);
+       oct->ioq_vector = vzalloc(size);
        if (!oct->ioq_vector)
                return 1;
-       memset(oct->ioq_vector, 0, size);
        for (i = 0; i < num_ioqs; i++) {
                ioq_vector              = &oct->ioq_vector[i];
                ioq_vector->oct_dev     = oct;
index 52b3a60..21618d0 100644 (file)
@@ -521,7 +521,7 @@ static void nicvf_unmap_page(struct nicvf *nic, struct page *page, u64 dma_addr)
 
 static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,
                                struct cqe_rx_t *cqe_rx, struct snd_queue *sq,
-                               struct sk_buff **skb)
+                               struct rcv_queue *rq, struct sk_buff **skb)
 {
        struct xdp_buff xdp;
        struct page *page;
@@ -545,6 +545,7 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,
        xdp.data = (void *)cpu_addr;
        xdp_set_data_meta_invalid(&xdp);
        xdp.data_end = xdp.data + len;
+       xdp.rxq = &rq->xdp_rxq;
        orig_data = xdp.data;
 
        rcu_read_lock();
@@ -698,7 +699,8 @@ static inline void nicvf_set_rxhash(struct net_device *netdev,
 
 static void nicvf_rcv_pkt_handler(struct net_device *netdev,
                                  struct napi_struct *napi,
-                                 struct cqe_rx_t *cqe_rx, struct snd_queue *sq)
+                                 struct cqe_rx_t *cqe_rx,
+                                 struct snd_queue *sq, struct rcv_queue *rq)
 {
        struct sk_buff *skb = NULL;
        struct nicvf *nic = netdev_priv(netdev);
@@ -724,7 +726,7 @@ static void nicvf_rcv_pkt_handler(struct net_device *netdev,
        /* For XDP, ignore pkts spanning multiple pages */
        if (nic->xdp_prog && (cqe_rx->rb_cnt == 1)) {
                /* Packet consumed by XDP */
-               if (nicvf_xdp_rx(snic, nic->xdp_prog, cqe_rx, sq, &skb))
+               if (nicvf_xdp_rx(snic, nic->xdp_prog, cqe_rx, sq, rq, &skb))
                        return;
        } else {
                skb = nicvf_get_rcv_skb(snic, cqe_rx,
@@ -781,6 +783,7 @@ static int nicvf_cq_intr_handler(struct net_device *netdev, u8 cq_idx,
        struct cqe_rx_t *cq_desc;
        struct netdev_queue *txq;
        struct snd_queue *sq = &qs->sq[cq_idx];
+       struct rcv_queue *rq = &qs->rq[cq_idx];
        unsigned int tx_pkts = 0, tx_bytes = 0, txq_idx;
 
        spin_lock_bh(&cq->lock);
@@ -811,7 +814,7 @@ loop:
 
                switch (cq_desc->cqe_type) {
                case CQE_TYPE_RX:
-                       nicvf_rcv_pkt_handler(netdev, napi, cq_desc, sq);
+                       nicvf_rcv_pkt_handler(netdev, napi, cq_desc, sq, rq);
                        work_done++;
                break;
                case CQE_TYPE_SEND:
index f38ea34..14e62c6 100644 (file)
@@ -760,6 +760,7 @@ static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs,
 
        if (!rq->enable) {
                nicvf_reclaim_rcv_queue(nic, qs, qidx);
+               xdp_rxq_info_unreg(&rq->xdp_rxq);
                return;
        }
 
@@ -772,6 +773,9 @@ static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs,
        /* all writes of RBDR data to be loaded into L2 Cache as well*/
        rq->caching = 1;
 
+       /* Driver have no proper error path for failed XDP RX-queue info reg */
+       WARN_ON(xdp_rxq_info_reg(&rq->xdp_rxq, nic->netdev, qidx) < 0);
+
        /* Send a mailbox msg to PF to config RQ */
        mbx.rq.msg = NIC_MBOX_MSG_RQ_CFG;
        mbx.rq.qs_num = qs->vnic_id;
index 178ab6e..7d1e4e2 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/netdevice.h>
 #include <linux/iommu.h>
 #include <linux/bpf.h>
+#include <net/xdp.h>
 #include "q_struct.h"
 
 #define MAX_QUEUE_SET                  128
@@ -255,6 +256,7 @@ struct rcv_queue {
        u8              start_qs_rbdr_idx; /* RBDR idx in the above QS */
        u8              caching;
        struct          rx_tx_queue_stats stats;
+       struct xdp_rxq_info xdp_rxq;
 } ____cacheline_aligned_in_smp;
 
 struct cmp_queue {
index 2e71e33..b57acb8 100644 (file)
@@ -405,37 +405,55 @@ static const u32 t6_ma_ireg_array2[][IREG_NUM_ELEM] = {
        {0x78f8, 0x78fc, 0xe640, 13} /* t6_ma_regs_e640_to_e7c0 */
 };
 
-static const u32 t6_up_cim_reg_array[][IREG_NUM_ELEM] = {
-       {0x7b50, 0x7b54, 0x2000, 0x20}, /* up_cim_2000_to_207c */
-       {0x7b50, 0x7b54, 0x2080, 0x1d}, /* up_cim_2080_to_20fc */
-       {0x7b50, 0x7b54, 0x00, 0x20}, /* up_cim_00_to_7c */
-       {0x7b50, 0x7b54, 0x80, 0x20}, /* up_cim_80_to_fc */
-       {0x7b50, 0x7b54, 0x100, 0x11}, /* up_cim_100_to_14c */
-       {0x7b50, 0x7b54, 0x200, 0x10}, /* up_cim_200_to_23c */
-       {0x7b50, 0x7b54, 0x240, 0x2}, /* up_cim_240_to_244 */
-       {0x7b50, 0x7b54, 0x250, 0x2}, /* up_cim_250_to_254 */
-       {0x7b50, 0x7b54, 0x260, 0x2}, /* up_cim_260_to_264 */
-       {0x7b50, 0x7b54, 0x270, 0x2}, /* up_cim_270_to_274 */
-       {0x7b50, 0x7b54, 0x280, 0x20}, /* up_cim_280_to_2fc */
-       {0x7b50, 0x7b54, 0x300, 0x20}, /* up_cim_300_to_37c */
-       {0x7b50, 0x7b54, 0x380, 0x14}, /* up_cim_380_to_3cc */
-
-};
-
-static const u32 t5_up_cim_reg_array[][IREG_NUM_ELEM] = {
-       {0x7b50, 0x7b54, 0x2000, 0x20}, /* up_cim_2000_to_207c */
-       {0x7b50, 0x7b54, 0x2080, 0x19}, /* up_cim_2080_to_20ec */
-       {0x7b50, 0x7b54, 0x00, 0x20}, /* up_cim_00_to_7c */
-       {0x7b50, 0x7b54, 0x80, 0x20}, /* up_cim_80_to_fc */
-       {0x7b50, 0x7b54, 0x100, 0x11}, /* up_cim_100_to_14c */
-       {0x7b50, 0x7b54, 0x200, 0x10}, /* up_cim_200_to_23c */
-       {0x7b50, 0x7b54, 0x240, 0x2}, /* up_cim_240_to_244 */
-       {0x7b50, 0x7b54, 0x250, 0x2}, /* up_cim_250_to_254 */
-       {0x7b50, 0x7b54, 0x260, 0x2}, /* up_cim_260_to_264 */
-       {0x7b50, 0x7b54, 0x270, 0x2}, /* up_cim_270_to_274 */
-       {0x7b50, 0x7b54, 0x280, 0x20}, /* up_cim_280_to_2fc */
-       {0x7b50, 0x7b54, 0x300, 0x20}, /* up_cim_300_to_37c */
-       {0x7b50, 0x7b54, 0x380, 0x14}, /* up_cim_380_to_3cc */
+static const u32 t6_up_cim_reg_array[][IREG_NUM_ELEM + 1] = {
+       {0x7b50, 0x7b54, 0x2000, 0x20, 0}, /* up_cim_2000_to_207c */
+       {0x7b50, 0x7b54, 0x2080, 0x1d, 0}, /* up_cim_2080_to_20fc */
+       {0x7b50, 0x7b54, 0x00, 0x20, 0}, /* up_cim_00_to_7c */
+       {0x7b50, 0x7b54, 0x80, 0x20, 0}, /* up_cim_80_to_fc */
+       {0x7b50, 0x7b54, 0x100, 0x11, 0}, /* up_cim_100_to_14c */
+       {0x7b50, 0x7b54, 0x200, 0x10, 0}, /* up_cim_200_to_23c */
+       {0x7b50, 0x7b54, 0x240, 0x2, 0}, /* up_cim_240_to_244 */
+       {0x7b50, 0x7b54, 0x250, 0x2, 0}, /* up_cim_250_to_254 */
+       {0x7b50, 0x7b54, 0x260, 0x2, 0}, /* up_cim_260_to_264 */
+       {0x7b50, 0x7b54, 0x270, 0x2, 0}, /* up_cim_270_to_274 */
+       {0x7b50, 0x7b54, 0x280, 0x20, 0}, /* up_cim_280_to_2fc */
+       {0x7b50, 0x7b54, 0x300, 0x20, 0}, /* up_cim_300_to_37c */
+       {0x7b50, 0x7b54, 0x380, 0x14, 0}, /* up_cim_380_to_3cc */
+       {0x7b50, 0x7b54, 0x2900, 0x4, 0x4}, /* up_cim_2900_to_3d40 */
+       {0x7b50, 0x7b54, 0x2904, 0x4, 0x4}, /* up_cim_2904_to_3d44 */
+       {0x7b50, 0x7b54, 0x2908, 0x4, 0x4}, /* up_cim_2908_to_3d48 */
+       {0x7b50, 0x7b54, 0x2910, 0x4, 0x4}, /* up_cim_2910_to_3d4c */
+       {0x7b50, 0x7b54, 0x2914, 0x4, 0x4}, /* up_cim_2914_to_3d50 */
+       {0x7b50, 0x7b54, 0x2920, 0x10, 0x10}, /* up_cim_2920_to_2a10 */
+       {0x7b50, 0x7b54, 0x2924, 0x10, 0x10}, /* up_cim_2924_to_2a14 */
+       {0x7b50, 0x7b54, 0x2928, 0x10, 0x10}, /* up_cim_2928_to_2a18 */
+       {0x7b50, 0x7b54, 0x292c, 0x10, 0x10}, /* up_cim_292c_to_2a1c */
+};
+
+static const u32 t5_up_cim_reg_array[][IREG_NUM_ELEM + 1] = {
+       {0x7b50, 0x7b54, 0x2000, 0x20, 0}, /* up_cim_2000_to_207c */
+       {0x7b50, 0x7b54, 0x2080, 0x19, 0}, /* up_cim_2080_to_20ec */
+       {0x7b50, 0x7b54, 0x00, 0x20, 0}, /* up_cim_00_to_7c */
+       {0x7b50, 0x7b54, 0x80, 0x20, 0}, /* up_cim_80_to_fc */
+       {0x7b50, 0x7b54, 0x100, 0x11, 0}, /* up_cim_100_to_14c */
+       {0x7b50, 0x7b54, 0x200, 0x10, 0}, /* up_cim_200_to_23c */
+       {0x7b50, 0x7b54, 0x240, 0x2, 0}, /* up_cim_240_to_244 */
+       {0x7b50, 0x7b54, 0x250, 0x2, 0}, /* up_cim_250_to_254 */
+       {0x7b50, 0x7b54, 0x260, 0x2, 0}, /* up_cim_260_to_264 */
+       {0x7b50, 0x7b54, 0x270, 0x2, 0}, /* up_cim_270_to_274 */
+       {0x7b50, 0x7b54, 0x280, 0x20, 0}, /* up_cim_280_to_2fc */
+       {0x7b50, 0x7b54, 0x300, 0x20, 0}, /* up_cim_300_to_37c */
+       {0x7b50, 0x7b54, 0x380, 0x14, 0}, /* up_cim_380_to_3cc */
+       {0x7b50, 0x7b54, 0x2900, 0x4, 0x4}, /* up_cim_2900_to_3d40 */
+       {0x7b50, 0x7b54, 0x2904, 0x4, 0x4}, /* up_cim_2904_to_3d44 */
+       {0x7b50, 0x7b54, 0x2908, 0x4, 0x4}, /* up_cim_2908_to_3d48 */
+       {0x7b50, 0x7b54, 0x2910, 0x4, 0x4}, /* up_cim_2910_to_3d4c */
+       {0x7b50, 0x7b54, 0x2914, 0x4, 0x4}, /* up_cim_2914_to_3d50 */
+       {0x7b50, 0x7b54, 0x2918, 0x4, 0x4}, /* up_cim_2918_to_3d54 */
+       {0x7b50, 0x7b54, 0x291c, 0x4, 0x4}, /* up_cim_291c_to_3d58 */
+       {0x7b50, 0x7b54, 0x2924, 0x10, 0x10}, /* up_cim_2924_to_2914 */
+       {0x7b50, 0x7b54, 0x2928, 0x10, 0x10}, /* up_cim_2928_to_2a18 */
+       {0x7b50, 0x7b54, 0x292c, 0x10, 0x10}, /* up_cim_292c_to_2a1c */
 };
 
 static const u32 t6_hma_ireg_array[][IREG_NUM_ELEM] = {
index e8173ae..88e7400 100644 (file)
@@ -21,6 +21,7 @@
 /* Error codes */
 #define CUDBG_STATUS_NO_MEM -19
 #define CUDBG_STATUS_ENTITY_NOT_FOUND -24
+#define CUDBG_STATUS_NOT_IMPLEMENTED -28
 #define CUDBG_SYSTEM_ERROR -29
 #define CUDBG_STATUS_CCLK_NOT_DEFINED -32
 
index 336670d..0a3871f 100644 (file)
@@ -2422,11 +2422,21 @@ int cudbg_collect_up_cim_indirect(struct cudbg_init *pdbg_init,
 {
        struct adapter *padap = pdbg_init->adap;
        struct cudbg_buffer temp_buff = { 0 };
+       u32 local_offset, local_range;
        struct ireg_buf *up_cim;
+       u32 size, j, iter;
+       u32 instance = 0;
        int i, rc, n;
-       u32 size;
 
-       n = sizeof(t5_up_cim_reg_array) / (IREG_NUM_ELEM * sizeof(u32));
+       if (is_t5(padap->params.chip))
+               n = sizeof(t5_up_cim_reg_array) /
+                   ((IREG_NUM_ELEM + 1) * sizeof(u32));
+       else if (is_t6(padap->params.chip))
+               n = sizeof(t6_up_cim_reg_array) /
+                   ((IREG_NUM_ELEM + 1) * sizeof(u32));
+       else
+               return CUDBG_STATUS_NOT_IMPLEMENTED;
+
        size = sizeof(struct ireg_buf) * n;
        rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
        if (rc)
@@ -2444,6 +2454,7 @@ int cudbg_collect_up_cim_indirect(struct cudbg_init *pdbg_init,
                                                t5_up_cim_reg_array[i][2];
                        up_cim_reg->ireg_offset_range =
                                                t5_up_cim_reg_array[i][3];
+                       instance = t5_up_cim_reg_array[i][4];
                } else if (is_t6(padap->params.chip)) {
                        up_cim_reg->ireg_addr = t6_up_cim_reg_array[i][0];
                        up_cim_reg->ireg_data = t6_up_cim_reg_array[i][1];
@@ -2451,13 +2462,35 @@ int cudbg_collect_up_cim_indirect(struct cudbg_init *pdbg_init,
                                                t6_up_cim_reg_array[i][2];
                        up_cim_reg->ireg_offset_range =
                                                t6_up_cim_reg_array[i][3];
+                       instance = t6_up_cim_reg_array[i][4];
                }
 
-               rc = t4_cim_read(padap, up_cim_reg->ireg_local_offset,
-                                up_cim_reg->ireg_offset_range, buff);
-               if (rc) {
-                       cudbg_put_buff(&temp_buff, dbg_buff);
-                       return rc;
+               switch (instance) {
+               case NUM_CIM_CTL_TSCH_CHANNEL_INSTANCES:
+                       iter = up_cim_reg->ireg_offset_range;
+                       local_offset = 0x120;
+                       local_range = 1;
+                       break;
+               case NUM_CIM_CTL_TSCH_CHANNEL_TSCH_CLASS_INSTANCES:
+                       iter = up_cim_reg->ireg_offset_range;
+                       local_offset = 0x10;
+                       local_range = 1;
+                       break;
+               default:
+                       iter = 1;
+                       local_offset = 0;
+                       local_range = up_cim_reg->ireg_offset_range;
+                       break;
+               }
+
+               for (j = 0; j < iter; j++, buff++) {
+                       rc = t4_cim_read(padap,
+                                        up_cim_reg->ireg_local_offset +
+                                        (j * local_offset), local_range, buff);
+                       if (rc) {
+                               cudbg_put_buff(&temp_buff, dbg_buff);
+                               return rc;
+                       }
                }
                up_cim++;
        }
index 581d628..a2d6c8a 100644 (file)
@@ -274,7 +274,13 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity)
                len = sizeof(struct cudbg_ulptx_la);
                break;
        case CUDBG_UP_CIM_INDIRECT:
-               n = sizeof(t5_up_cim_reg_array) / (IREG_NUM_ELEM * sizeof(u32));
+               n = 0;
+               if (is_t5(adap->params.chip))
+                       n = sizeof(t5_up_cim_reg_array) /
+                           ((IREG_NUM_ELEM + 1) * sizeof(u32));
+               else if (is_t6(adap->params.chip))
+                       n = sizeof(t6_up_cim_reg_array) /
+                           ((IREG_NUM_ELEM + 1) * sizeof(u32));
                len = sizeof(struct ireg_buf) * n;
                break;
        case CUDBG_PBT_TABLE:
index d3ced04..4ea76c1 100644 (file)
@@ -1743,7 +1743,7 @@ static int mps_tcam_show(struct seq_file *seq, void *v)
                         */
                        if (lookup_type && (lookup_type != DATALKPTYPE_M)) {
                                /* Inner header VNI */
-                               vniy = ((data2 & DATAVIDH2_F) << 23) |
+                               vniy = (data2 & DATAVIDH2_F) |
                                       (DATAVIDH1_G(data2) << 16) | VIDL_G(val);
                                dip_hit = data2 & DATADIPHIT_F;
                        } else {
@@ -1753,6 +1753,7 @@ static int mps_tcam_show(struct seq_file *seq, void *v)
                        port_num = DATAPORTNUM_G(data2);
 
                        /* Read tcamx. Change the control param */
+                       vnix = 0;
                        ctl |= CTLXYBITSEL_V(1);
                        t4_write_reg(adap, MPS_CLS_TCAM_DATA2_CTL_A, ctl);
                        val = t4_read_reg(adap, MPS_CLS_TCAM_DATA1_A);
@@ -1761,7 +1762,7 @@ static int mps_tcam_show(struct seq_file *seq, void *v)
                        data2 = t4_read_reg(adap, MPS_CLS_TCAM_DATA2_CTL_A);
                        if (lookup_type && (lookup_type != DATALKPTYPE_M)) {
                                /* Inner header VNI mask */
-                               vnix = ((data2 & DATAVIDH2_F) << 23) |
+                               vnix = (data2 & DATAVIDH2_F) |
                                       (DATAVIDH1_G(data2) << 16) | VIDL_G(val);
                        }
                } else {
@@ -1834,7 +1835,8 @@ static int mps_tcam_show(struct seq_file *seq, void *v)
                                           addr[1], addr[2], addr[3],
                                           addr[4], addr[5],
                                           (unsigned long long)mask,
-                                          vniy, vnix, dip_hit ? 'Y' : 'N',
+                                          vniy, (vnix | vniy),
+                                          dip_hit ? 'Y' : 'N',
                                           port_num,
                                           (cls_lo & T6_SRAM_VLD_F) ? 'Y' : 'N',
                                           PORTMAP_G(cls_hi),
index 541419b..7852d98 100644 (file)
@@ -517,7 +517,8 @@ static int from_fw_port_mod_type(enum fw_port_type port_type,
                else
                        return PORT_OTHER;
        } else if (port_type == FW_PORT_TYPE_KR4_100G ||
-                  port_type == FW_PORT_TYPE_KR_SFP28) {
+                  port_type == FW_PORT_TYPE_KR_SFP28 ||
+                  port_type == FW_PORT_TYPE_KR_XLAUI) {
                return PORT_NONE;
        }
 
@@ -645,6 +646,13 @@ static void fw_caps_to_lmm(enum fw_port_type port_type,
                FW_CAPS_TO_LMM(SPEED_25G, 25000baseKR_Full);
                break;
 
+       case FW_PORT_TYPE_KR_XLAUI:
+               SET_LMM(Backplane);
+               FW_CAPS_TO_LMM(SPEED_1G, 1000baseKX_Full);
+               FW_CAPS_TO_LMM(SPEED_10G, 10000baseKR_Full);
+               FW_CAPS_TO_LMM(SPEED_40G, 40000baseKR4_Full);
+               break;
+
        case FW_PORT_TYPE_CR2_QSFP:
                SET_LMM(FIBRE);
                SET_LMM(50000baseSR2_Full);
index 5980f30..677a3ba 100644 (file)
@@ -694,7 +694,7 @@ void clear_filter(struct adapter *adap, struct filter_entry *f)
        if (f->smt)
                cxgb4_smt_release(f->smt);
 
-       if (f->fs.hash && f->fs.type)
+       if ((f->fs.hash || is_t6(adap->params.chip)) && f->fs.type)
                cxgb4_clip_release(f->dev, (const u32 *)&f->fs.val.lip, 1);
 
        /* The zeroing of the filter rule below clears the filter valid,
@@ -1189,6 +1189,7 @@ int __cxgb4_set_filter(struct net_device *dev, int filter_id,
                       struct filter_ctx *ctx)
 {
        struct adapter *adapter = netdev2adap(dev);
+       unsigned int chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
        unsigned int max_fidx, fidx;
        struct filter_entry *f;
        u32 iconf;
@@ -1225,12 +1226,18 @@ int __cxgb4_set_filter(struct net_device *dev, int filter_id,
         * insertion.
         */
        if (fs->type == 0) { /* IPv4 */
-               /* If our IPv4 filter isn't being written to a
-                * multiple of four filter index and there's an IPv6
-                * filter at the multiple of 4 base slot, then we
-                * prevent insertion.
+               /* For T6, If our IPv4 filter isn't being written to a
+                * multiple of two filter index and there's an IPv6
+                * filter at the multiple of 2 base slot, then we need
+                * to delete that IPv6 filter ...
+                * For adapters below T6, IPv6 filter occupies 4 entries.
+                * Hence we need to delete the filter in multiple of 4 slot.
                 */
-               fidx = filter_id & ~0x3;
+               if (chip_ver < CHELSIO_T6)
+                       fidx = filter_id & ~0x3;
+               else
+                       fidx = filter_id & ~0x1;
+
                if (fidx != filter_id &&
                    adapter->tids.ftid_tab[fidx].fs.type) {
                        f = &adapter->tids.ftid_tab[fidx];
@@ -1291,6 +1298,16 @@ int __cxgb4_set_filter(struct net_device *dev, int filter_id,
        if (f->valid)
                clear_filter(adapter, f);
 
+       if (is_t6(adapter->params.chip) && fs->type &&
+           ipv6_addr_type((const struct in6_addr *)fs->val.lip) !=
+           IPV6_ADDR_ANY) {
+               ret = cxgb4_clip_get(dev, (const u32 *)&fs->val.lip, 1);
+               if (ret) {
+                       cxgb4_clear_ftid(&adapter->tids, filter_id, PF_INET6);
+                       return ret;
+               }
+       }
+
        /* Convert the filter specification into our internal format.
         * We copy the PF/VF specification into the Outer VLAN field
         * here so the rest of the code -- including the interface to
index 44930ca..f2a60e0 100644 (file)
@@ -6084,6 +6084,7 @@ const char *t4_get_port_type_description(enum fw_port_type port_type)
                "CR2_QSFP",
                "SFP28",
                "KR_SFP28",
+               "KR_XLAUI"
        };
 
        if (port_type < ARRAY_SIZE(port_type_description))
index 60cf9e0..51b1803 100644 (file)
@@ -183,6 +183,7 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
        CH_PCI_ID_TABLE_FENTRY(0x50a9), /* Custom T580-KR */
        CH_PCI_ID_TABLE_FENTRY(0x50aa), /* Custom T580-CR */
        CH_PCI_ID_TABLE_FENTRY(0x50ab), /* Custom T520-CR */
+       CH_PCI_ID_TABLE_FENTRY(0x50ac), /* Custom T540-BT */
 
        /* T6 adapters:
         */
@@ -206,6 +207,7 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
        CH_PCI_ID_TABLE_FENTRY(0x6084), /* Custom T64100-CR QSFP28 */
        CH_PCI_ID_TABLE_FENTRY(0x6085), /* Custom T6240-SO */
        CH_PCI_ID_TABLE_FENTRY(0x6086), /* Custom T6225-SO-CR */
+       CH_PCI_ID_TABLE_FENTRY(0x6087), /* Custom T6225-CR */
 CH_PCI_DEVICE_ID_TABLE_DEFINE_END;
 
 #endif /* __T4_PCI_ID_TBL_H__ */
index f6701e0..863bc29 100644 (file)
@@ -45,6 +45,9 @@
 #define PF_BASE(idx) (PF0_BASE + (idx) * PF_STRIDE)
 #define PF_REG(idx, reg) (PF_BASE(idx) + (reg))
 
+#define NUM_CIM_CTL_TSCH_CHANNEL_INSTANCES 4
+#define NUM_CIM_CTL_TSCH_CHANNEL_TSCH_CLASS_INSTANCES 16
+
 #define MYPORT_BASE 0x1c000
 #define MYPORT_REG(reg_addr) (MYPORT_BASE + (reg_addr))
 
index 01f5a5e..427f252 100644 (file)
@@ -2829,6 +2829,7 @@ enum fw_port_type {
        FW_PORT_TYPE_CR2_QSFP,
        FW_PORT_TYPE_SFP28,
        FW_PORT_TYPE_KR_SFP28,
+       FW_PORT_TYPE_KR_XLAUI,
 
        FW_PORT_TYPE_NONE = FW_PORT_CMD_PTYPE_M
 };
index b48361c..96f69f8 100644 (file)
@@ -1229,7 +1229,8 @@ static int from_fw_port_mod_type(enum fw_port_type port_type,
                else
                        return PORT_OTHER;
        } else if (port_type == FW_PORT_TYPE_KR4_100G ||
-                  port_type == FW_PORT_TYPE_KR_SFP28) {
+                  port_type == FW_PORT_TYPE_KR_SFP28 ||
+                  port_type == FW_PORT_TYPE_KR_XLAUI) {
                return PORT_NONE;
        }
 
@@ -1323,6 +1324,13 @@ static void fw_caps_to_lmm(enum fw_port_type port_type,
                SET_LMM(25000baseKR_Full);
                break;
 
+       case FW_PORT_TYPE_KR_XLAUI:
+               SET_LMM(Backplane);
+               FW_CAPS_TO_LMM(SPEED_1G, 1000baseKX_Full);
+               FW_CAPS_TO_LMM(SPEED_10G, 10000baseKR_Full);
+               FW_CAPS_TO_LMM(SPEED_40G, 40000baseKR4_Full);
+               break;
+
        case FW_PORT_TYPE_CR2_QSFP:
                SET_LMM(FIBRE);
                SET_LMM(50000baseSR2_Full);
index 6a95270..9b218f0 100644 (file)
@@ -43,6 +43,8 @@
 #define ENIC_CQ_MAX            (ENIC_WQ_MAX + ENIC_RQ_MAX)
 #define ENIC_INTR_MAX          (ENIC_CQ_MAX + 2)
 
+#define ENIC_WQ_NAPI_BUDGET    256
+
 #define ENIC_AIC_LARGE_PKT_DIFF        3
 
 struct enic_msix_entry {
index d98676e..f202ba7 100644 (file)
@@ -1500,7 +1500,7 @@ static int enic_poll(struct napi_struct *napi, int budget)
        unsigned int cq_wq = enic_cq_wq(enic, 0);
        unsigned int intr = enic_legacy_io_intr();
        unsigned int rq_work_to_do = budget;
-       unsigned int wq_work_to_do = -1; /* no limit */
+       unsigned int wq_work_to_do = ENIC_WQ_NAPI_BUDGET;
        unsigned int  work_done, rq_work_done = 0, wq_work_done;
        int err;
 
@@ -1598,7 +1598,7 @@ static int enic_poll_msix_wq(struct napi_struct *napi, int budget)
        struct vnic_wq *wq = &enic->wq[wq_index];
        unsigned int cq;
        unsigned int intr;
-       unsigned int wq_work_to_do = -1; /* clean all desc possible */
+       unsigned int wq_work_to_do = ENIC_WQ_NAPI_BUDGET;
        unsigned int wq_work_done;
        unsigned int wq_irq;
 
index 2d1b065..e17d10b 100644 (file)
@@ -818,6 +818,12 @@ static void fec_enet_bd_init(struct net_device *dev)
                for (i = 0; i < txq->bd.ring_size; i++) {
                        /* Initialize the BD for every fragment in the page. */
                        bdp->cbd_sc = cpu_to_fec16(0);
+                       if (bdp->cbd_bufaddr &&
+                           !IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr)))
+                               dma_unmap_single(&fep->pdev->dev,
+                                                fec32_to_cpu(bdp->cbd_bufaddr),
+                                                fec16_to_cpu(bdp->cbd_datlen),
+                                                DMA_TO_DEVICE);
                        if (txq->tx_skbuff[i]) {
                                dev_kfree_skb_any(txq->tx_skbuff[i]);
                                txq->tx_skbuff[i] = NULL;
index 8b5cdf4..cac86e9 100644 (file)
@@ -1168,7 +1168,7 @@ void hns_set_led_opt(struct hns_mac_cb *mac_cb)
 int hns_cpld_led_set_id(struct hns_mac_cb *mac_cb,
                        enum hnae_led_state status)
 {
-       if (!mac_cb || !mac_cb->cpld_ctrl)
+       if (!mac_cb)
                return 0;
 
        return mac_cb->dsaf_dev->misc_op->cpld_set_led_id(mac_cb, status);
index 408b63f..ca247c2 100644 (file)
@@ -18,6 +18,7 @@ enum _dsm_op_index {
        HNS_OP_LED_SET_FUNC             = 0x3,
        HNS_OP_GET_PORT_TYPE_FUNC       = 0x4,
        HNS_OP_GET_SFP_STAT_FUNC        = 0x5,
+       HNS_OP_LOCATE_LED_SET_FUNC      = 0x6,
 };
 
 enum _dsm_rst_type {
@@ -81,6 +82,33 @@ static void hns_dsaf_acpi_ledctrl_by_port(struct hns_mac_cb *mac_cb, u8 op_type,
        ACPI_FREE(obj);
 }
 
+static void hns_dsaf_acpi_locate_ledctrl_by_port(struct hns_mac_cb *mac_cb,
+                                                u8 op_type, u32 locate,
+                                                u32 port)
+{
+       union acpi_object obj_args[2], argv4;
+       union acpi_object *obj;
+
+       obj_args[0].integer.type = ACPI_TYPE_INTEGER;
+       obj_args[0].integer.value = locate;
+       obj_args[1].integer.type = ACPI_TYPE_INTEGER;
+       obj_args[1].integer.value = port;
+
+       argv4.type = ACPI_TYPE_PACKAGE;
+       argv4.package.count = 2;
+       argv4.package.elements = obj_args;
+
+       obj = acpi_evaluate_dsm(ACPI_HANDLE(mac_cb->dev),
+                               &hns_dsaf_acpi_dsm_guid, 0, op_type, &argv4);
+       if (!obj) {
+               dev_err(mac_cb->dev, "ledctrl fail, locate:%d port:%d!\n",
+                       locate, port);
+               return;
+       }
+
+       ACPI_FREE(obj);
+}
+
 static void hns_cpld_set_led(struct hns_mac_cb *mac_cb, int link_status,
                             u16 speed, int data)
 {
@@ -160,6 +188,9 @@ static void cpld_led_reset_acpi(struct hns_mac_cb *mac_cb)
 static int cpld_set_led_id(struct hns_mac_cb *mac_cb,
                           enum hnae_led_state status)
 {
+       if (!mac_cb->cpld_ctrl)
+               return 0;
+
        switch (status) {
        case HNAE_LED_ACTIVE:
                mac_cb->cpld_led_value =
@@ -184,6 +215,30 @@ static int cpld_set_led_id(struct hns_mac_cb *mac_cb,
        return 0;
 }
 
+static int cpld_set_led_id_acpi(struct hns_mac_cb *mac_cb,
+                               enum hnae_led_state status)
+{
+       switch (status) {
+       case HNAE_LED_ACTIVE:
+               hns_dsaf_acpi_locate_ledctrl_by_port(mac_cb,
+                                                    HNS_OP_LOCATE_LED_SET_FUNC,
+                                                    CPLD_LED_ON_VALUE,
+                                                    mac_cb->mac_id);
+               break;
+       case HNAE_LED_INACTIVE:
+               hns_dsaf_acpi_locate_ledctrl_by_port(mac_cb,
+                                                    HNS_OP_LOCATE_LED_SET_FUNC,
+                                                    CPLD_LED_DEFAULT_VALUE,
+                                                    mac_cb->mac_id);
+               break;
+       default:
+               dev_err(mac_cb->dev, "invalid led state: %d!", status);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
 #define RESET_REQ_OR_DREQ 1
 
 static void hns_dsaf_acpi_srst_by_port(struct dsaf_device *dsaf_dev, u8 op_type,
@@ -660,7 +715,7 @@ struct dsaf_misc_op *hns_misc_op_get(struct dsaf_device *dsaf_dev)
        } else if (is_acpi_node(dsaf_dev->dev->fwnode)) {
                misc_op->cpld_set_led = hns_cpld_set_led_acpi;
                misc_op->cpld_reset_led = cpld_led_reset_acpi;
-               misc_op->cpld_set_led_id = cpld_set_led_id;
+               misc_op->cpld_set_led_id = cpld_set_led_id_acpi;
 
                misc_op->dsaf_reset = hns_dsaf_rst_acpi;
                misc_op->xge_srst = hns_dsaf_xge_srst_by_port_acpi;
index a9e2b32..82e9a80 100644 (file)
@@ -278,6 +278,8 @@ struct hnae3_ae_dev {
  *   Set vlan filter config of Ports
  * set_vf_vlan_filter()
  *   Set vlan filter config of vf
+ * enable_hw_strip_rxvtag()
+ *   Enable/disable hardware strip vlan tag of packets received
  */
 struct hnae3_ae_ops {
        int (*init_ae_dev)(struct hnae3_ae_dev *ae_dev);
@@ -384,8 +386,16 @@ struct hnae3_ae_ops {
                               u16 vlan_id, bool is_kill);
        int (*set_vf_vlan_filter)(struct hnae3_handle *handle, int vfid,
                                  u16 vlan, u8 qos, __be16 proto);
+       int (*enable_hw_strip_rxvtag)(struct hnae3_handle *handle, bool enable);
        void (*reset_event)(struct hnae3_handle *handle,
                            enum hnae3_reset_type reset);
+       void (*get_channels)(struct hnae3_handle *handle,
+                            struct ethtool_channels *ch);
+       void (*get_tqps_and_rss_info)(struct hnae3_handle *h,
+                                     u16 *free_tqps, u16 *max_rss_size);
+       int (*set_channels)(struct hnae3_handle *handle, u32 new_tqps_num);
+       void (*get_flowctrl_adv)(struct hnae3_handle *handle,
+                                u32 *flowctrl_adv);
 };
 
 struct hnae3_dcb_ops {
index c2c1323..320ae88 100644 (file)
@@ -723,6 +723,58 @@ static void hns3_set_txbd_baseinfo(u16 *bdtp_fe_sc_vld_ra_ri, int frag_end)
        hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_SC_M, HNS3_TXD_SC_S, 0);
 }
 
+static int hns3_fill_desc_vtags(struct sk_buff *skb,
+                               struct hns3_enet_ring *tx_ring,
+                               u32 *inner_vlan_flag,
+                               u32 *out_vlan_flag,
+                               u16 *inner_vtag,
+                               u16 *out_vtag)
+{
+#define HNS3_TX_VLAN_PRIO_SHIFT 13
+
+       if (skb->protocol == htons(ETH_P_8021Q) &&
+           !(tx_ring->tqp->handle->kinfo.netdev->features &
+           NETIF_F_HW_VLAN_CTAG_TX)) {
+               /* When HW VLAN acceleration is turned off, and the stack
+                * sets the protocol to 802.1q, the driver just need to
+                * set the protocol to the encapsulated ethertype.
+                */
+               skb->protocol = vlan_get_protocol(skb);
+               return 0;
+       }
+
+       if (skb_vlan_tag_present(skb)) {
+               u16 vlan_tag;
+
+               vlan_tag = skb_vlan_tag_get(skb);
+               vlan_tag |= (skb->priority & 0x7) << HNS3_TX_VLAN_PRIO_SHIFT;
+
+               /* Based on hw strategy, use out_vtag in two layer tag case,
+                * and use inner_vtag in one tag case.
+                */
+               if (skb->protocol == htons(ETH_P_8021Q)) {
+                       hnae_set_bit(*out_vlan_flag, HNS3_TXD_OVLAN_B, 1);
+                       *out_vtag = vlan_tag;
+               } else {
+                       hnae_set_bit(*inner_vlan_flag, HNS3_TXD_VLAN_B, 1);
+                       *inner_vtag = vlan_tag;
+               }
+       } else if (skb->protocol == htons(ETH_P_8021Q)) {
+               struct vlan_ethhdr *vhdr;
+               int rc;
+
+               rc = skb_cow_head(skb, 0);
+               if (rc < 0)
+                       return rc;
+               vhdr = (struct vlan_ethhdr *)skb->data;
+               vhdr->h_vlan_TCI |= cpu_to_be16((skb->priority & 0x7)
+                                       << HNS3_TX_VLAN_PRIO_SHIFT);
+       }
+
+       skb->protocol = vlan_get_protocol(skb);
+       return 0;
+}
+
 static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
                          int size, dma_addr_t dma, int frag_end,
                          enum hns_desc_type type)
@@ -733,6 +785,8 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
        u16 bdtp_fe_sc_vld_ra_ri = 0;
        u32 type_cs_vlan_tso = 0;
        struct sk_buff *skb;
+       u16 inner_vtag = 0;
+       u16 out_vtag = 0;
        u32 paylen = 0;
        u16 mss = 0;
        __be16 protocol;
@@ -756,15 +810,16 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
                skb = (struct sk_buff *)priv;
                paylen = skb->len;
 
+               ret = hns3_fill_desc_vtags(skb, ring, &type_cs_vlan_tso,
+                                          &ol_type_vlan_len_msec,
+                                          &inner_vtag, &out_vtag);
+               if (unlikely(ret))
+                       return ret;
+
                if (skb->ip_summed == CHECKSUM_PARTIAL) {
                        skb_reset_mac_len(skb);
                        protocol = skb->protocol;
 
-                       /* vlan packet*/
-                       if (protocol == htons(ETH_P_8021Q)) {
-                               protocol = vlan_get_protocol(skb);
-                               skb->protocol = protocol;
-                       }
                        ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto);
                        if (ret)
                                return ret;
@@ -790,6 +845,8 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
                        cpu_to_le32(type_cs_vlan_tso);
                desc->tx.paylen = cpu_to_le32(paylen);
                desc->tx.mss = cpu_to_le16(mss);
+               desc->tx.vlan_tag = cpu_to_le16(inner_vtag);
+               desc->tx.outer_vlan_tag = cpu_to_le16(out_vtag);
        }
 
        /* move ring pointer to next.*/
@@ -1032,6 +1089,9 @@ static int hns3_nic_set_features(struct net_device *netdev,
                                 netdev_features_t features)
 {
        struct hns3_nic_priv *priv = netdev_priv(netdev);
+       struct hnae3_handle *h = priv->ae_handle;
+       netdev_features_t changed;
+       int ret;
 
        if (features & (NETIF_F_TSO | NETIF_F_TSO6)) {
                priv->ops.fill_desc = hns3_fill_desc_tso;
@@ -1041,6 +1101,17 @@ static int hns3_nic_set_features(struct net_device *netdev,
                priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
        }
 
+       changed = netdev->features ^ features;
+       if (changed & NETIF_F_HW_VLAN_CTAG_RX) {
+               if (features & NETIF_F_HW_VLAN_CTAG_RX)
+                       ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, true);
+               else
+                       ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, false);
+
+               if (ret)
+                       return ret;
+       }
+
        netdev->features = features;
        return 0;
 }
@@ -1492,6 +1563,7 @@ static void hns3_set_default_feature(struct net_device *netdev)
 
        netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
                NETIF_F_HW_VLAN_CTAG_FILTER |
+               NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
                NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
                NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
                NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
@@ -1506,6 +1578,7 @@ static void hns3_set_default_feature(struct net_device *netdev)
 
        netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
                NETIF_F_HW_VLAN_CTAG_FILTER |
+               NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
                NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
                NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
                NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
@@ -2085,6 +2158,22 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
 
        prefetchw(skb->data);
 
+       /* Based on hw strategy, the tag offloaded will be stored at
+        * ot_vlan_tag in two layer tag case, and stored at vlan_tag
+        * in one layer tag case.
+        */
+       if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
+               u16 vlan_tag;
+
+               vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
+               if (!(vlan_tag & VLAN_VID_MASK))
+                       vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
+               if (vlan_tag & VLAN_VID_MASK)
+                       __vlan_hwaccel_put_tag(skb,
+                                              htons(ETH_P_8021Q),
+                                              vlan_tag);
+       }
+
        bnum = 1;
        if (length <= HNS3_RX_HEAD_SIZE) {
                memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long)));
@@ -2651,6 +2740,19 @@ err:
        return ret;
 }
 
+static void hns3_put_ring_config(struct hns3_nic_priv *priv)
+{
+       struct hnae3_handle *h = priv->ae_handle;
+       int i;
+
+       for (i = 0; i < h->kinfo.num_tqps; i++) {
+               devm_kfree(priv->dev, priv->ring_data[i].ring);
+               devm_kfree(priv->dev,
+                          priv->ring_data[i + h->kinfo.num_tqps].ring);
+       }
+       devm_kfree(priv->dev, priv->ring_data);
+}
+
 static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring)
 {
        int ret;
@@ -2787,8 +2889,12 @@ int hns3_uninit_all_ring(struct hns3_nic_priv *priv)
                        h->ae_algo->ops->reset_queue(h, i);
 
                hns3_fini_ring(priv->ring_data[i].ring);
+               devm_kfree(priv->dev, priv->ring_data[i].ring);
                hns3_fini_ring(priv->ring_data[i + h->kinfo.num_tqps].ring);
+               devm_kfree(priv->dev,
+                          priv->ring_data[i + h->kinfo.num_tqps].ring);
        }
+       devm_kfree(priv->dev, priv->ring_data);
 
        return 0;
 }
@@ -3162,6 +3268,115 @@ static int hns3_reset_notify(struct hnae3_handle *handle,
        return ret;
 }
 
+static u16 hns3_get_max_available_channels(struct net_device *netdev)
+{
+       struct hnae3_handle *h = hns3_get_handle(netdev);
+       u16 free_tqps, max_rss_size, max_tqps;
+
+       h->ae_algo->ops->get_tqps_and_rss_info(h, &free_tqps, &max_rss_size);
+       max_tqps = h->kinfo.num_tc * max_rss_size;
+
+       return min_t(u16, max_tqps, (free_tqps + h->kinfo.num_tqps));
+}
+
+static int hns3_modify_tqp_num(struct net_device *netdev, u16 new_tqp_num)
+{
+       struct hns3_nic_priv *priv = netdev_priv(netdev);
+       struct hnae3_handle *h = hns3_get_handle(netdev);
+       int ret;
+
+       ret = h->ae_algo->ops->set_channels(h, new_tqp_num);
+       if (ret)
+               return ret;
+
+       ret = hns3_get_ring_config(priv);
+       if (ret)
+               return ret;
+
+       ret = hns3_nic_init_vector_data(priv);
+       if (ret)
+               goto err_uninit_vector;
+
+       ret = hns3_init_all_ring(priv);
+       if (ret)
+               goto err_put_ring;
+
+       return 0;
+
+err_put_ring:
+       hns3_put_ring_config(priv);
+err_uninit_vector:
+       hns3_nic_uninit_vector_data(priv);
+       return ret;
+}
+
+static int hns3_adjust_tqps_num(u8 num_tc, u32 new_tqp_num)
+{
+       return (new_tqp_num / num_tc) * num_tc;
+}
+
+int hns3_set_channels(struct net_device *netdev,
+                     struct ethtool_channels *ch)
+{
+       struct hns3_nic_priv *priv = netdev_priv(netdev);
+       struct hnae3_handle *h = hns3_get_handle(netdev);
+       struct hnae3_knic_private_info *kinfo = &h->kinfo;
+       bool if_running = netif_running(netdev);
+       u32 new_tqp_num = ch->combined_count;
+       u16 org_tqp_num;
+       int ret;
+
+       if (ch->rx_count || ch->tx_count)
+               return -EINVAL;
+
+       if (new_tqp_num > hns3_get_max_available_channels(netdev) ||
+           new_tqp_num < kinfo->num_tc) {
+               dev_err(&netdev->dev,
+                       "Change tqps fail, the tqp range is from %d to %d",
+                       kinfo->num_tc,
+                       hns3_get_max_available_channels(netdev));
+               return -EINVAL;
+       }
+
+       new_tqp_num = hns3_adjust_tqps_num(kinfo->num_tc, new_tqp_num);
+       if (kinfo->num_tqps == new_tqp_num)
+               return 0;
+
+       if (if_running)
+               dev_close(netdev);
+
+       hns3_clear_all_ring(h);
+
+       ret = hns3_nic_uninit_vector_data(priv);
+       if (ret) {
+               dev_err(&netdev->dev,
+                       "Unbind vector with tqp fail, nothing is changed");
+               goto open_netdev;
+       }
+
+       hns3_uninit_all_ring(priv);
+
+       org_tqp_num = h->kinfo.num_tqps;
+       ret = hns3_modify_tqp_num(netdev, new_tqp_num);
+       if (ret) {
+               ret = hns3_modify_tqp_num(netdev, org_tqp_num);
+               if (ret) {
+                       /* If revert to old tqp failed, fatal error occurred */
+                       dev_err(&netdev->dev,
+                               "Revert to old tqp num fail, ret=%d", ret);
+                       return ret;
+               }
+               dev_info(&netdev->dev,
+                        "Change tqp num fail, Revert to old tqp num");
+       }
+
+open_netdev:
+       if (if_running)
+               dev_open(netdev);
+
+       return ret;
+}
+
 static const struct hnae3_client_ops client_ops = {
        .init_instance = hns3_client_init,
        .uninit_instance = hns3_client_uninit,
index 8a9de75..a2a7ea3 100644 (file)
@@ -595,6 +595,8 @@ static inline void hns3_write_reg(void __iomem *base, u32 reg, u32 value)
        (((struct hns3_nic_priv *)netdev_priv(ndev))->ae_handle)
 
 void hns3_ethtool_set_ops(struct net_device *netdev);
+int hns3_set_channels(struct net_device *netdev,
+                     struct ethtool_channels *ch);
 
 bool hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget);
 int hns3_init_all_ring(struct hns3_nic_priv *priv);
index 65a69b4..379c01d 100644 (file)
@@ -559,10 +559,23 @@ static void hns3_get_pauseparam(struct net_device *netdev,
                        &param->rx_pause, &param->tx_pause);
 }
 
+static int hns3_set_pauseparam(struct net_device *netdev,
+                              struct ethtool_pauseparam *param)
+{
+       struct hnae3_handle *h = hns3_get_handle(netdev);
+
+       if (h->ae_algo->ops->set_pauseparam)
+               return h->ae_algo->ops->set_pauseparam(h, param->autoneg,
+                                                      param->rx_pause,
+                                                      param->tx_pause);
+       return -EOPNOTSUPP;
+}
+
 static int hns3_get_link_ksettings(struct net_device *netdev,
                                   struct ethtool_link_ksettings *cmd)
 {
        struct hnae3_handle *h = hns3_get_handle(netdev);
+       u32 flowctrl_adv = 0;
        u32 supported_caps;
        u32 advertised_caps;
        u8 media_type = HNAE3_MEDIA_TYPE_UNKNOWN;
@@ -638,6 +651,8 @@ static int hns3_get_link_ksettings(struct net_device *netdev,
                if (!cmd->base.autoneg)
                        advertised_caps &= ~HNS3_LM_AUTONEG_BIT;
 
+               advertised_caps &= ~HNS3_LM_PAUSE_BIT;
+
                /* now, map driver link modes to ethtool link modes */
                hns3_driv_to_eth_caps(supported_caps, cmd, false);
                hns3_driv_to_eth_caps(advertised_caps, cmd, true);
@@ -650,6 +665,18 @@ static int hns3_get_link_ksettings(struct net_device *netdev,
        /* 4.mdio_support */
        cmd->base.mdio_support = ETH_MDIO_SUPPORTS_C22;
 
+       /* 5.get flow control setttings */
+       if (h->ae_algo->ops->get_flowctrl_adv)
+               h->ae_algo->ops->get_flowctrl_adv(h, &flowctrl_adv);
+
+       if (flowctrl_adv & ADVERTISED_Pause)
+               ethtool_link_ksettings_add_link_mode(cmd, advertising,
+                                                    Pause);
+
+       if (flowctrl_adv & ADVERTISED_Asym_Pause)
+               ethtool_link_ksettings_add_link_mode(cmd, advertising,
+                                                    Asym_Pause);
+
        return 0;
 }
 
@@ -730,7 +757,7 @@ static int hns3_get_rxnfc(struct net_device *netdev,
 
        switch (cmd->cmd) {
        case ETHTOOL_GRXRINGS:
-               cmd->data = h->kinfo.num_tc * h->kinfo.rss_size;
+               cmd->data = h->kinfo.rss_size;
                break;
        case ETHTOOL_GRXFH:
                return h->ae_algo->ops->get_rss_tuple(h, cmd);
@@ -849,6 +876,15 @@ static int hns3_nway_reset(struct net_device *netdev)
        return genphy_restart_aneg(phy);
 }
 
+static void hns3_get_channels(struct net_device *netdev,
+                             struct ethtool_channels *ch)
+{
+       struct hnae3_handle *h = hns3_get_handle(netdev);
+
+       if (h->ae_algo->ops->get_channels)
+               h->ae_algo->ops->get_channels(h, ch);
+}
+
 static const struct ethtool_ops hns3vf_ethtool_ops = {
        .get_drvinfo = hns3_get_drvinfo,
        .get_ringparam = hns3_get_ringparam,
@@ -871,6 +907,7 @@ static const struct ethtool_ops hns3_ethtool_ops = {
        .get_ringparam = hns3_get_ringparam,
        .set_ringparam = hns3_set_ringparam,
        .get_pauseparam = hns3_get_pauseparam,
+       .set_pauseparam = hns3_set_pauseparam,
        .get_strings = hns3_get_strings,
        .get_ethtool_stats = hns3_get_stats,
        .get_sset_count = hns3_get_sset_count,
@@ -883,6 +920,8 @@ static const struct ethtool_ops hns3_ethtool_ops = {
        .get_link_ksettings = hns3_get_link_ksettings,
        .set_link_ksettings = hns3_set_link_ksettings,
        .nway_reset = hns3_nway_reset,
+       .get_channels = hns3_get_channels,
+       .set_channels = hns3_set_channels,
 };
 
 void hns3_ethtool_set_ops(struct net_device *netdev)
index ce5ed88..f5baba2 100644 (file)
@@ -180,6 +180,10 @@ enum hclge_opcode_type {
        /* Promisuous mode command */
        HCLGE_OPC_CFG_PROMISC_MODE      = 0x0E01,
 
+       /* Vlan offload command */
+       HCLGE_OPC_VLAN_PORT_TX_CFG      = 0x0F01,
+       HCLGE_OPC_VLAN_PORT_RX_CFG      = 0x0F02,
+
        /* Interrupts cmd */
        HCLGE_OPC_ADD_RING_TO_VECTOR    = 0x1503,
        HCLGE_OPC_DEL_RING_TO_VECTOR    = 0x1504,
@@ -191,6 +195,7 @@ enum hclge_opcode_type {
        HCLGE_OPC_MAC_VLAN_INSERT           = 0x1003,
        HCLGE_OPC_MAC_ETHTYPE_ADD           = 0x1010,
        HCLGE_OPC_MAC_ETHTYPE_REMOVE    = 0x1011,
+       HCLGE_OPC_MAC_VLAN_MASK_SET     = 0x1012,
 
        /* Multicast linear table cmd */
        HCLGE_OPC_MTA_MAC_MODE_CFG          = 0x1020,
@@ -399,6 +404,8 @@ struct hclge_pf_res_cmd {
 #define HCLGE_CFG_MAC_ADDR_H_M GENMASK(15, 0)
 #define HCLGE_CFG_DEFAULT_SPEED_S      16
 #define HCLGE_CFG_DEFAULT_SPEED_M      GENMASK(23, 16)
+#define HCLGE_CFG_RSS_SIZE_S   24
+#define HCLGE_CFG_RSS_SIZE_M   GENMASK(31, 24)
 
 struct hclge_cfg_param_cmd {
        __le32 offset;
@@ -587,6 +594,15 @@ struct hclge_mac_vlan_tbl_entry_cmd {
        u8      rsv2[6];
 };
 
+#define HCLGE_VLAN_MASK_EN_B           0x0
+struct hclge_mac_vlan_mask_entry_cmd {
+       u8 rsv0[2];
+       u8 vlan_mask;
+       u8 rsv1;
+       u8 mac_mask[6];
+       u8 rsv2[14];
+};
+
 #define HCLGE_CFG_MTA_MAC_SEL_S                0x0
 #define HCLGE_CFG_MTA_MAC_SEL_M                GENMASK(1, 0)
 #define HCLGE_CFG_MTA_MAC_EN_B         0x7
@@ -658,6 +674,47 @@ struct hclge_vlan_filter_vf_cfg_cmd {
        u8  vf_bitmap[16];
 };
 
+#define HCLGE_ACCEPT_TAG_B             0
+#define HCLGE_ACCEPT_UNTAG_B           1
+#define HCLGE_PORT_INS_TAG1_EN_B       2
+#define HCLGE_PORT_INS_TAG2_EN_B       3
+#define HCLGE_CFG_NIC_ROCE_SEL_B       4
+struct hclge_vport_vtag_tx_cfg_cmd {
+       u8 vport_vlan_cfg;
+       u8 vf_offset;
+       u8 rsv1[2];
+       __le16 def_vlan_tag1;
+       __le16 def_vlan_tag2;
+       u8 vf_bitmap[8];
+       u8 rsv2[8];
+};
+
+#define HCLGE_REM_TAG1_EN_B            0
+#define HCLGE_REM_TAG2_EN_B            1
+#define HCLGE_SHOW_TAG1_EN_B           2
+#define HCLGE_SHOW_TAG2_EN_B           3
+struct hclge_vport_vtag_rx_cfg_cmd {
+       u8 vport_vlan_cfg;
+       u8 vf_offset;
+       u8 rsv1[6];
+       u8 vf_bitmap[8];
+       u8 rsv2[8];
+};
+
+struct hclge_tx_vlan_type_cfg_cmd {
+       __le16 ot_vlan_type;
+       __le16 in_vlan_type;
+       u8 rsv[20];
+};
+
+struct hclge_rx_vlan_type_cfg_cmd {
+       __le16 ot_fst_vlan_type;
+       __le16 ot_sec_vlan_type;
+       __le16 in_fst_vlan_type;
+       __le16 in_sec_vlan_type;
+       u8 rsv[16];
+};
+
 struct hclge_cfg_com_tqp_queue_cmd {
        __le16 tqp_id;
        __le16 stream_id;
index e97fd66..0874acf 100644 (file)
@@ -982,6 +982,10 @@ static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
        cfg->default_speed = hnae_get_field(__le32_to_cpu(req->param[3]),
                                            HCLGE_CFG_DEFAULT_SPEED_M,
                                            HCLGE_CFG_DEFAULT_SPEED_S);
+       cfg->rss_size_max = hnae_get_field(__le32_to_cpu(req->param[3]),
+                                          HCLGE_CFG_RSS_SIZE_M,
+                                          HCLGE_CFG_RSS_SIZE_S);
+
        for (i = 0; i < ETH_ALEN; i++)
                cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
 
@@ -1059,7 +1063,7 @@ static int hclge_configure(struct hclge_dev *hdev)
 
        hdev->num_vmdq_vport = cfg.vmdq_vport_num;
        hdev->base_tqp_pid = 0;
-       hdev->rss_size_max = 1;
+       hdev->rss_size_max = cfg.rss_size_max;
        hdev->rx_buf_len = cfg.rx_buf_len;
        ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
        hdev->hw.mac.media_type = cfg.media_type;
@@ -1096,10 +1100,7 @@ static int hclge_configure(struct hclge_dev *hdev)
        for (i = 0; i < hdev->tm_info.num_tc; i++)
                hnae_set_bit(hdev->hw_tc_map, i, 1);
 
-       if (!hdev->num_vmdq_vport && !hdev->num_req_vfs)
-               hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
-       else
-               hdev->tx_sch_mode = HCLGE_FLAG_VNET_BASE_SCH_MODE;
+       hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
 
        return ret;
 }
@@ -2133,28 +2134,6 @@ static int hclge_query_mac_an_speed_dup(struct hclge_dev *hdev, int *speed,
        return 0;
 }
 
-static int hclge_query_autoneg_result(struct hclge_dev *hdev)
-{
-       struct hclge_mac *mac = &hdev->hw.mac;
-       struct hclge_query_an_speed_dup_cmd *req;
-       struct hclge_desc desc;
-       int ret;
-
-       req = (struct hclge_query_an_speed_dup_cmd *)desc.data;
-
-       hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_AN_RESULT, true);
-       ret = hclge_cmd_send(&hdev->hw, &desc, 1);
-       if (ret) {
-               dev_err(&hdev->pdev->dev,
-                       "autoneg result query cmd failed %d.\n", ret);
-               return ret;
-       }
-
-       mac->autoneg = hnae_get_bit(req->an_syn_dup_speed, HCLGE_QUERY_AN_B);
-
-       return 0;
-}
-
 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
 {
        struct hclge_config_auto_neg_cmd *req;
@@ -2190,15 +2169,42 @@ static int hclge_get_autoneg(struct hnae3_handle *handle)
 {
        struct hclge_vport *vport = hclge_get_vport(handle);
        struct hclge_dev *hdev = vport->back;
+       struct phy_device *phydev = hdev->hw.mac.phydev;
 
-       hclge_query_autoneg_result(hdev);
+       if (phydev)
+               return phydev->autoneg;
 
        return hdev->hw.mac.autoneg;
 }
 
+static int hclge_set_default_mac_vlan_mask(struct hclge_dev *hdev,
+                                          bool mask_vlan,
+                                          u8 *mac_mask)
+{
+       struct hclge_mac_vlan_mask_entry_cmd *req;
+       struct hclge_desc desc;
+       int status;
+
+       req = (struct hclge_mac_vlan_mask_entry_cmd *)desc.data;
+       hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_MASK_SET, false);
+
+       hnae_set_bit(req->vlan_mask, HCLGE_VLAN_MASK_EN_B,
+                    mask_vlan ? 1 : 0);
+       ether_addr_copy(req->mac_mask, mac_mask);
+
+       status = hclge_cmd_send(&hdev->hw, &desc, 1);
+       if (status)
+               dev_err(&hdev->pdev->dev,
+                       "Config mac_vlan_mask failed for cmd_send, ret =%d\n",
+                       status);
+
+       return status;
+}
+
 static int hclge_mac_init(struct hclge_dev *hdev)
 {
        struct hclge_mac *mac = &hdev->hw.mac;
+       u8 mac_mask[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
        int ret;
 
        ret = hclge_cfg_mac_speed_dup(hdev, hdev->hw.mac.speed, HCLGE_MAC_FULL);
@@ -2224,7 +2230,19 @@ static int hclge_mac_init(struct hclge_dev *hdev)
                return ret;
        }
 
-       return hclge_cfg_func_mta_filter(hdev, 0, hdev->accept_mta_mc);
+       ret = hclge_cfg_func_mta_filter(hdev, 0, hdev->accept_mta_mc);
+       if (ret) {
+               dev_err(&hdev->pdev->dev,
+                       "set mta filter mode fail ret=%d\n", ret);
+               return ret;
+       }
+
+       ret = hclge_set_default_mac_vlan_mask(hdev, true, mac_mask);
+       if (ret)
+               dev_err(&hdev->pdev->dev,
+                       "set default mac_vlan_mask fail ret=%d\n", ret);
+
+       return ret;
 }
 
 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
@@ -4339,27 +4357,185 @@ static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
        return hclge_set_vf_vlan_common(hdev, vfid, false, vlan, qos, proto);
 }
 
+static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
+{
+       struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
+       struct hclge_vport_vtag_tx_cfg_cmd *req;
+       struct hclge_dev *hdev = vport->back;
+       struct hclge_desc desc;
+       int status;
+
+       hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
+
+       req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
+       req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
+       req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
+       hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG_B,
+                    vcfg->accept_tag ? 1 : 0);
+       hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG_B,
+                    vcfg->accept_untag ? 1 : 0);
+       hnae_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
+                    vcfg->insert_tag1_en ? 1 : 0);
+       hnae_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
+                    vcfg->insert_tag2_en ? 1 : 0);
+       hnae_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
+
+       req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
+       req->vf_bitmap[req->vf_offset] =
+               1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
+
+       status = hclge_cmd_send(&hdev->hw, &desc, 1);
+       if (status)
+               dev_err(&hdev->pdev->dev,
+                       "Send port txvlan cfg command fail, ret =%d\n",
+                       status);
+
+       return status;
+}
+
+static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
+{
+       struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
+       struct hclge_vport_vtag_rx_cfg_cmd *req;
+       struct hclge_dev *hdev = vport->back;
+       struct hclge_desc desc;
+       int status;
+
+       hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
+
+       req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
+       hnae_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
+                    vcfg->strip_tag1_en ? 1 : 0);
+       hnae_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
+                    vcfg->strip_tag2_en ? 1 : 0);
+       hnae_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
+                    vcfg->vlan1_vlan_prionly ? 1 : 0);
+       hnae_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
+                    vcfg->vlan2_vlan_prionly ? 1 : 0);
+
+       req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
+       req->vf_bitmap[req->vf_offset] =
+               1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
+
+       status = hclge_cmd_send(&hdev->hw, &desc, 1);
+       if (status)
+               dev_err(&hdev->pdev->dev,
+                       "Send port rxvlan cfg command fail, ret =%d\n",
+                       status);
+
+       return status;
+}
+
+static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
+{
+       struct hclge_rx_vlan_type_cfg_cmd *rx_req;
+       struct hclge_tx_vlan_type_cfg_cmd *tx_req;
+       struct hclge_desc desc;
+       int status;
+
+       hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
+       rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
+       rx_req->ot_fst_vlan_type =
+               cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
+       rx_req->ot_sec_vlan_type =
+               cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
+       rx_req->in_fst_vlan_type =
+               cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
+       rx_req->in_sec_vlan_type =
+               cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
+
+       status = hclge_cmd_send(&hdev->hw, &desc, 1);
+       if (status) {
+               dev_err(&hdev->pdev->dev,
+                       "Send rxvlan protocol type command fail, ret =%d\n",
+                       status);
+               return status;
+       }
+
+       hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
+
+       tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)&desc.data;
+       tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
+       tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
+
+       status = hclge_cmd_send(&hdev->hw, &desc, 1);
+       if (status)
+               dev_err(&hdev->pdev->dev,
+                       "Send txvlan protocol type command fail, ret =%d\n",
+                       status);
+
+       return status;
+}
+
 static int hclge_init_vlan_config(struct hclge_dev *hdev)
 {
-#define HCLGE_VLAN_TYPE_VF_TABLE   0
-#define HCLGE_VLAN_TYPE_PORT_TABLE 1
+#define HCLGE_FILTER_TYPE_VF           0
+#define HCLGE_FILTER_TYPE_PORT         1
+#define HCLGE_DEF_VLAN_TYPE            0x8100
+
        struct hnae3_handle *handle;
+       struct hclge_vport *vport;
        int ret;
+       int i;
 
-       ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_VLAN_TYPE_VF_TABLE,
-                                        true);
+       ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, true);
        if (ret)
                return ret;
 
-       ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_VLAN_TYPE_PORT_TABLE,
-                                        true);
+       ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT, true);
        if (ret)
                return ret;
 
+       hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
+       hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
+       hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
+       hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
+       hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
+       hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
+
+       ret = hclge_set_vlan_protocol_type(hdev);
+       if (ret)
+               return ret;
+
+       for (i = 0; i < hdev->num_alloc_vport; i++) {
+               vport = &hdev->vport[i];
+               vport->txvlan_cfg.accept_tag = true;
+               vport->txvlan_cfg.accept_untag = true;
+               vport->txvlan_cfg.insert_tag1_en = false;
+               vport->txvlan_cfg.insert_tag2_en = false;
+               vport->txvlan_cfg.default_tag1 = 0;
+               vport->txvlan_cfg.default_tag2 = 0;
+
+               ret = hclge_set_vlan_tx_offload_cfg(vport);
+               if (ret)
+                       return ret;
+
+               vport->rxvlan_cfg.strip_tag1_en = false;
+               vport->rxvlan_cfg.strip_tag2_en = true;
+               vport->rxvlan_cfg.vlan1_vlan_prionly = false;
+               vport->rxvlan_cfg.vlan2_vlan_prionly = false;
+
+               ret = hclge_set_vlan_rx_offload_cfg(vport);
+               if (ret)
+                       return ret;
+       }
+
        handle = &hdev->vport[0].nic;
        return hclge_set_port_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
 }
 
+static int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
+{
+       struct hclge_vport *vport = hclge_get_vport(handle);
+
+       vport->rxvlan_cfg.strip_tag1_en = false;
+       vport->rxvlan_cfg.strip_tag2_en = enable;
+       vport->rxvlan_cfg.vlan1_vlan_prionly = false;
+       vport->rxvlan_cfg.vlan2_vlan_prionly = false;
+
+       return hclge_set_vlan_rx_offload_cfg(vport);
+}
+
 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
 {
        struct hclge_vport *vport = hclge_get_vport(handle);
@@ -4481,6 +4657,103 @@ static u32 hclge_get_fw_version(struct hnae3_handle *handle)
        return hdev->fw_version;
 }
 
+static void hclge_get_flowctrl_adv(struct hnae3_handle *handle,
+                                  u32 *flowctrl_adv)
+{
+       struct hclge_vport *vport = hclge_get_vport(handle);
+       struct hclge_dev *hdev = vport->back;
+       struct phy_device *phydev = hdev->hw.mac.phydev;
+
+       if (!phydev)
+               return;
+
+       *flowctrl_adv |= (phydev->advertising & ADVERTISED_Pause) |
+                        (phydev->advertising & ADVERTISED_Asym_Pause);
+}
+
+static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
+{
+       struct phy_device *phydev = hdev->hw.mac.phydev;
+
+       if (!phydev)
+               return;
+
+       phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
+
+       if (rx_en)
+               phydev->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
+
+       if (tx_en)
+               phydev->advertising ^= ADVERTISED_Asym_Pause;
+}
+
+static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
+{
+       enum hclge_fc_mode fc_mode;
+       int ret;
+
+       if (rx_en && tx_en)
+               fc_mode = HCLGE_FC_FULL;
+       else if (rx_en && !tx_en)
+               fc_mode = HCLGE_FC_RX_PAUSE;
+       else if (!rx_en && tx_en)
+               fc_mode = HCLGE_FC_TX_PAUSE;
+       else
+               fc_mode = HCLGE_FC_NONE;
+
+       if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
+               hdev->fc_mode_last_time = fc_mode;
+               return 0;
+       }
+
+       ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
+       if (ret) {
+               dev_err(&hdev->pdev->dev, "configure pauseparam error, ret = %d.\n",
+                       ret);
+               return ret;
+       }
+
+       hdev->tm_info.fc_mode = fc_mode;
+
+       return 0;
+}
+
+int hclge_cfg_flowctrl(struct hclge_dev *hdev)
+{
+       struct phy_device *phydev = hdev->hw.mac.phydev;
+       u16 remote_advertising = 0;
+       u16 local_advertising = 0;
+       u32 rx_pause, tx_pause;
+       u8 flowctl;
+
+       if (!phydev->link || !phydev->autoneg)
+               return 0;
+
+       if (phydev->advertising & ADVERTISED_Pause)
+               local_advertising = ADVERTISE_PAUSE_CAP;
+
+       if (phydev->advertising & ADVERTISED_Asym_Pause)
+               local_advertising |= ADVERTISE_PAUSE_ASYM;
+
+       if (phydev->pause)
+               remote_advertising = LPA_PAUSE_CAP;
+
+       if (phydev->asym_pause)
+               remote_advertising |= LPA_PAUSE_ASYM;
+
+       flowctl = mii_resolve_flowctrl_fdx(local_advertising,
+                                          remote_advertising);
+       tx_pause = flowctl & FLOW_CTRL_TX;
+       rx_pause = flowctl & FLOW_CTRL_RX;
+
+       if (phydev->duplex == HCLGE_MAC_HALF) {
+               tx_pause = 0;
+               rx_pause = 0;
+       }
+
+       return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
+}
+
 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
                                 u32 *rx_en, u32 *tx_en)
 {
@@ -4510,6 +4783,41 @@ static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
        }
 }
 
+static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
+                               u32 rx_en, u32 tx_en)
+{
+       struct hclge_vport *vport = hclge_get_vport(handle);
+       struct hclge_dev *hdev = vport->back;
+       struct phy_device *phydev = hdev->hw.mac.phydev;
+       u32 fc_autoneg;
+
+       /* Only support flow control negotiation for netdev with
+        * phy attached for now.
+        */
+       if (!phydev)
+               return -EOPNOTSUPP;
+
+       fc_autoneg = hclge_get_autoneg(handle);
+       if (auto_neg != fc_autoneg) {
+               dev_info(&hdev->pdev->dev,
+                        "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
+               return -EOPNOTSUPP;
+       }
+
+       if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
+               dev_info(&hdev->pdev->dev,
+                        "Priority flow control enabled. Cannot set link flow control.\n");
+               return -EOPNOTSUPP;
+       }
+
+       hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
+
+       if (!fc_autoneg)
+               return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
+
+       return phy_start_aneg(phydev);
+}
+
 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
                                          u8 *auto_neg, u32 *speed, u8 *duplex)
 {
@@ -5002,6 +5310,136 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
        ae_dev->priv = NULL;
 }
 
+static u32 hclge_get_max_channels(struct hnae3_handle *handle)
+{
+       struct hnae3_knic_private_info *kinfo = &handle->kinfo;
+       struct hclge_vport *vport = hclge_get_vport(handle);
+       struct hclge_dev *hdev = vport->back;
+
+       return min_t(u32, hdev->rss_size_max * kinfo->num_tc, hdev->num_tqps);
+}
+
+static void hclge_get_channels(struct hnae3_handle *handle,
+                              struct ethtool_channels *ch)
+{
+       struct hclge_vport *vport = hclge_get_vport(handle);
+
+       ch->max_combined = hclge_get_max_channels(handle);
+       ch->other_count = 1;
+       ch->max_other = 1;
+       ch->combined_count = vport->alloc_tqps;
+}
+
+static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
+                                       u16 *free_tqps, u16 *max_rss_size)
+{
+       struct hclge_vport *vport = hclge_get_vport(handle);
+       struct hclge_dev *hdev = vport->back;
+       u16 temp_tqps = 0;
+       int i;
+
+       for (i = 0; i < hdev->num_tqps; i++) {
+               if (!hdev->htqp[i].alloced)
+                       temp_tqps++;
+       }
+       *free_tqps = temp_tqps;
+       *max_rss_size = hdev->rss_size_max;
+}
+
+static void hclge_release_tqp(struct hclge_vport *vport)
+{
+       struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
+       struct hclge_dev *hdev = vport->back;
+       int i;
+
+       for (i = 0; i < kinfo->num_tqps; i++) {
+               struct hclge_tqp *tqp =
+                       container_of(kinfo->tqp[i], struct hclge_tqp, q);
+
+               tqp->q.handle = NULL;
+               tqp->q.tqp_index = 0;
+               tqp->alloced = false;
+       }
+
+       devm_kfree(&hdev->pdev->dev, kinfo->tqp);
+       kinfo->tqp = NULL;
+}
+
+static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num)
+{
+       struct hclge_vport *vport = hclge_get_vport(handle);
+       struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
+       struct hclge_dev *hdev = vport->back;
+       int cur_rss_size = kinfo->rss_size;
+       int cur_tqps = kinfo->num_tqps;
+       u16 tc_offset[HCLGE_MAX_TC_NUM];
+       u16 tc_valid[HCLGE_MAX_TC_NUM];
+       u16 tc_size[HCLGE_MAX_TC_NUM];
+       u16 roundup_size;
+       u32 *rss_indir;
+       int ret, i;
+
+       hclge_release_tqp(vport);
+
+       ret = hclge_knic_setup(vport, new_tqps_num);
+       if (ret) {
+               dev_err(&hdev->pdev->dev, "setup nic fail, ret =%d\n", ret);
+               return ret;
+       }
+
+       ret = hclge_map_tqp_to_vport(hdev, vport);
+       if (ret) {
+               dev_err(&hdev->pdev->dev, "map vport tqp fail, ret =%d\n", ret);
+               return ret;
+       }
+
+       ret = hclge_tm_schd_init(hdev);
+       if (ret) {
+               dev_err(&hdev->pdev->dev, "tm schd init fail, ret =%d\n", ret);
+               return ret;
+       }
+
+       roundup_size = roundup_pow_of_two(kinfo->rss_size);
+       roundup_size = ilog2(roundup_size);
+       /* Set the RSS TC mode according to the new RSS size */
+       for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
+               tc_valid[i] = 0;
+
+               if (!(hdev->hw_tc_map & BIT(i)))
+                       continue;
+
+               tc_valid[i] = 1;
+               tc_size[i] = roundup_size;
+               tc_offset[i] = kinfo->rss_size * i;
+       }
+       ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
+       if (ret)
+               return ret;
+
+       /* Reinitializes the rss indirect table according to the new RSS size */
+       rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
+       if (!rss_indir)
+               return -ENOMEM;
+
+       for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
+               rss_indir[i] = i % kinfo->rss_size;
+
+       ret = hclge_set_rss(handle, rss_indir, NULL, 0);
+       if (ret)
+               dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
+                       ret);
+
+       kfree(rss_indir);
+
+       if (!ret)
+               dev_info(&hdev->pdev->dev,
+                        "Channels changed, rss_size from %d to %d, tqps from %d to %d",
+                        cur_rss_size, kinfo->rss_size,
+                        cur_tqps, kinfo->rss_size * kinfo->num_tc);
+
+       return ret;
+}
+
 static const struct hnae3_ae_ops hclge_ops = {
        .init_ae_dev = hclge_init_ae_dev,
        .uninit_ae_dev = hclge_uninit_ae_dev,
@@ -5035,6 +5473,7 @@ static const struct hnae3_ae_ops hclge_ops = {
        .set_autoneg = hclge_set_autoneg,
        .get_autoneg = hclge_get_autoneg,
        .get_pauseparam = hclge_get_pauseparam,
+       .set_pauseparam = hclge_set_pauseparam,
        .set_mtu = hclge_set_mtu,
        .reset_queue = hclge_reset_tqp,
        .get_stats = hclge_get_stats,
@@ -5045,7 +5484,12 @@ static const struct hnae3_ae_ops hclge_ops = {
        .get_mdix_mode = hclge_get_mdix_mode,
        .set_vlan_filter = hclge_set_port_vlan_filter,
        .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
+       .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
        .reset_event = hclge_reset_event,
+       .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
+       .set_channels = hclge_set_channels,
+       .get_channels = hclge_get_channels,
+       .get_flowctrl_adv = hclge_get_flowctrl_adv,
 };
 
 static struct hnae3_ae_algo ae_algo = {
index fb043b5..28cc063 100644 (file)
 #define HCLGE_PHY_MDIX_STATUS_B        (6)
 #define HCLGE_PHY_SPEED_DUP_RESOLVE_B  (11)
 
+/* Factor used to calculate offset and bitmap of VF num */
+#define HCLGE_VF_NUM_PER_CMD           64
+#define HCLGE_VF_NUM_PER_BYTE          8
+
 /* Reset related Registers */
 #define HCLGE_MISC_RESET_STS_REG       0x20700
 #define HCLGE_GLOBAL_RESET_REG         0x20A00
@@ -220,6 +224,7 @@ struct hclge_cfg {
        u8 tc_num;
        u16 tqp_desc_num;
        u16 rx_buf_len;
+       u16 rss_size_max;
        u8 phy_addr;
        u8 media_type;
        u8 mac_addr[ETH_ALEN];
@@ -423,6 +428,15 @@ struct hclge_hw_stats {
        struct hclge_32_bit_stats   all_32_bit_stats;
 };
 
+struct hclge_vlan_type_cfg {
+       u16 rx_ot_fst_vlan_type;
+       u16 rx_ot_sec_vlan_type;
+       u16 rx_in_fst_vlan_type;
+       u16 rx_in_sec_vlan_type;
+       u16 tx_ot_vlan_type;
+       u16 tx_in_vlan_type;
+};
+
 struct hclge_dev {
        struct pci_dev *pdev;
        struct hnae3_ae_dev *ae_dev;
@@ -509,6 +523,26 @@ struct hclge_dev {
        enum hclge_mta_dmac_sel_type mta_mac_sel_type;
        bool enable_mta; /* Mutilcast filter enable */
        bool accept_mta_mc; /* Whether accept mta filter multicast */
+
+       struct hclge_vlan_type_cfg vlan_type_cfg;
+};
+
+/* VPort level vlan tag configuration for TX direction */
+struct hclge_tx_vtag_cfg {
+       bool accept_tag;        /* Whether accept tagged packet from host */
+       bool accept_untag;      /* Whether accept untagged packet from host */
+       bool insert_tag1_en;    /* Whether insert inner vlan tag */
+       bool insert_tag2_en;    /* Whether insert outer vlan tag */
+       u16  default_tag1;      /* The default inner vlan tag to insert */
+       u16  default_tag2;      /* The default outer vlan tag to insert */
+};
+
+/* VPort level vlan tag configuration for RX direction */
+struct hclge_rx_vtag_cfg {
+       bool strip_tag1_en;     /* Whether strip inner vlan tag */
+       bool strip_tag2_en;     /* Whether strip outer vlan tag */
+       bool vlan1_vlan_prionly;/* Inner VLAN Tag up to descriptor Enable */
+       bool vlan2_vlan_prionly;/* Outer VLAN Tag up to descriptor Enable */
 };
 
 struct hclge_vport {
@@ -523,6 +557,9 @@ struct hclge_vport {
        u16 bw_limit;           /* VSI BW Limit (0 = disabled) */
        u8  dwrr;
 
+       struct hclge_tx_vtag_cfg  txvlan_cfg;
+       struct hclge_rx_vtag_cfg  rxvlan_cfg;
+
        int vport_id;
        struct hclge_dev *back;  /* Back reference to associated dev */
        struct hnae3_handle nic;
@@ -565,4 +602,5 @@ int hclge_rss_init_hw(struct hclge_dev *hdev);
 
 void hclge_mbx_handler(struct hclge_dev *hdev);
 void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id);
+int hclge_cfg_flowctrl(struct hclge_dev *hdev);
 #endif
index 7069e94..c1dea3a 100644 (file)
@@ -17,6 +17,7 @@
 #define HCLGE_PHY_SUPPORTED_FEATURES   (SUPPORTED_Autoneg | \
                                         SUPPORTED_TP | \
                                         SUPPORTED_Pause | \
+                                        SUPPORTED_Asym_Pause | \
                                         PHY_10BT_FEATURES | \
                                         PHY_100BT_FEATURES | \
                                         PHY_1000BT_FEATURES)
@@ -183,6 +184,10 @@ static void hclge_mac_adjust_link(struct net_device *netdev)
        ret = hclge_cfg_mac_speed_dup(hdev, speed, duplex);
        if (ret)
                netdev_err(netdev, "failed to adjust link.\n");
+
+       ret = hclge_cfg_flowctrl(hdev);
+       if (ret)
+               netdev_err(netdev, "failed to configure flow control.\n");
 }
 
 int hclge_mac_start_phy(struct hclge_dev *hdev)
index 7bfa2e5..ea9355d 100644 (file)
@@ -23,8 +23,8 @@ enum hclge_shaper_level {
        HCLGE_SHAPER_LVL_PF     = 1,
 };
 
-#define HCLGE_SHAPER_BS_U_DEF  1
-#define HCLGE_SHAPER_BS_S_DEF  4
+#define HCLGE_SHAPER_BS_U_DEF  5
+#define HCLGE_SHAPER_BS_S_DEF  20
 
 #define HCLGE_ETHER_MAX_RATE   100000
 
@@ -112,7 +112,7 @@ static int hclge_shaper_para_calc(u32 ir, u8 shaper_level,
        return 0;
 }
 
-static int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx)
+int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx)
 {
        struct hclge_desc desc;
 
index bf59961..16f4139 100644 (file)
@@ -118,4 +118,5 @@ void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc);
 int hclge_tm_dwrr_cfg(struct hclge_dev *hdev);
 int hclge_tm_map_cfg(struct hclge_dev *hdev);
 int hclge_tm_init_hw(struct hclge_dev *hdev);
+int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx);
 #endif
index 7feff24..71ddad1 100644 (file)
@@ -199,18 +199,18 @@ static void __emac_set_multicast_list(struct emac_instance *dev);
 
 static inline int emac_phy_supports_gige(int phy_mode)
 {
-       return  phy_mode == PHY_MODE_GMII ||
-               phy_mode == PHY_MODE_RGMII ||
-               phy_mode == PHY_MODE_SGMII ||
-               phy_mode == PHY_MODE_TBI ||
-               phy_mode == PHY_MODE_RTBI;
+       return  phy_interface_mode_is_rgmii(phy_mode) ||
+               phy_mode == PHY_INTERFACE_MODE_GMII ||
+               phy_mode == PHY_INTERFACE_MODE_SGMII ||
+               phy_mode == PHY_INTERFACE_MODE_TBI ||
+               phy_mode == PHY_INTERFACE_MODE_RTBI;
 }
 
 static inline int emac_phy_gpcs(int phy_mode)
 {
-       return  phy_mode == PHY_MODE_SGMII ||
-               phy_mode == PHY_MODE_TBI ||
-               phy_mode == PHY_MODE_RTBI;
+       return  phy_mode == PHY_INTERFACE_MODE_SGMII ||
+               phy_mode == PHY_INTERFACE_MODE_TBI ||
+               phy_mode == PHY_INTERFACE_MODE_RTBI;
 }
 
 static inline void emac_tx_enable(struct emac_instance *dev)
@@ -2865,7 +2865,7 @@ static int emac_init_config(struct emac_instance *dev)
        /* PHY mode needs some decoding */
        dev->phy_mode = of_get_phy_mode(np);
        if (dev->phy_mode < 0)
-               dev->phy_mode = PHY_MODE_NA;
+               dev->phy_mode = PHY_INTERFACE_MODE_NA;
 
        /* Check EMAC version */
        if (of_device_is_compatible(np, "ibm,emac4sync")) {
@@ -3168,7 +3168,7 @@ static int emac_probe(struct platform_device *ofdev)
        printk(KERN_INFO "%s: EMAC-%d %pOF, MAC %pM\n",
               ndev->name, dev->cell_index, np, ndev->dev_addr);
 
-       if (dev->phy_mode == PHY_MODE_SGMII)
+       if (dev->phy_mode == PHY_INTERFACE_MODE_SGMII)
                printk(KERN_NOTICE "%s: in SGMII mode\n", ndev->name);
 
        if (dev->phy.address >= 0)
index 5afcc27..bc14dcf 100644 (file)
@@ -104,19 +104,6 @@ struct emac_regs {
        } u1;
 };
 
-/*
- * PHY mode settings (EMAC <-> ZMII/RGMII bridge <-> PHY)
- */
-#define PHY_MODE_NA    PHY_INTERFACE_MODE_NA
-#define PHY_MODE_MII   PHY_INTERFACE_MODE_MII
-#define PHY_MODE_RMII  PHY_INTERFACE_MODE_RMII
-#define PHY_MODE_SMII  PHY_INTERFACE_MODE_SMII
-#define PHY_MODE_RGMII PHY_INTERFACE_MODE_RGMII
-#define PHY_MODE_TBI   PHY_INTERFACE_MODE_TBI
-#define PHY_MODE_GMII  PHY_INTERFACE_MODE_GMII
-#define PHY_MODE_RTBI  PHY_INTERFACE_MODE_RTBI
-#define PHY_MODE_SGMII PHY_INTERFACE_MODE_SGMII
-
 /* EMACx_MR0 */
 #define EMAC_MR0_RXI                   0x80000000
 #define EMAC_MR0_TXI                   0x40000000
index 35865d0..aa070c0 100644 (file)
@@ -96,7 +96,7 @@ int emac_mii_reset_gpcs(struct mii_phy *phy)
        if ((val & BMCR_ISOLATE) && limit > 0)
                gpcs_phy_write(phy, MII_BMCR, val & ~BMCR_ISOLATE);
 
-       if (limit > 0 && phy->mode == PHY_MODE_SGMII) {
+       if (limit > 0 && phy->mode == PHY_INTERFACE_MODE_SGMII) {
                /* Configure GPCS interface to recommended setting for SGMII */
                gpcs_phy_write(phy, 0x04, 0x8120); /* AsymPause, FDX */
                gpcs_phy_write(phy, 0x07, 0x2801); /* msg_pg, toggle */
@@ -313,16 +313,16 @@ static int cis8201_init(struct mii_phy *phy)
        epcr &= ~EPCR_MODE_MASK;
 
        switch (phy->mode) {
-       case PHY_MODE_TBI:
+       case PHY_INTERFACE_MODE_TBI:
                epcr |= EPCR_TBI_MODE;
                break;
-       case PHY_MODE_RTBI:
+       case PHY_INTERFACE_MODE_RTBI:
                epcr |= EPCR_RTBI_MODE;
                break;
-       case PHY_MODE_GMII:
+       case PHY_INTERFACE_MODE_GMII:
                epcr |= EPCR_GMII_MODE;
                break;
-       case PHY_MODE_RGMII:
+       case PHY_INTERFACE_MODE_RGMII:
        default:
                epcr |= EPCR_RGMII_MODE;
        }
index c4a1ac3..00f5999 100644 (file)
 /* RGMII bridge supports only GMII/TBI and RGMII/RTBI PHYs */
 static inline int rgmii_valid_mode(int phy_mode)
 {
-       return  phy_mode == PHY_MODE_GMII ||
-               phy_mode == PHY_MODE_MII ||
-               phy_mode == PHY_MODE_RGMII ||
-               phy_mode == PHY_MODE_TBI ||
-               phy_mode == PHY_MODE_RTBI;
-}
-
-static inline const char *rgmii_mode_name(int mode)
-{
-       switch (mode) {
-       case PHY_MODE_RGMII:
-               return "RGMII";
-       case PHY_MODE_TBI:
-               return "TBI";
-       case PHY_MODE_GMII:
-               return "GMII";
-       case PHY_MODE_MII:
-               return "MII";
-       case PHY_MODE_RTBI:
-               return "RTBI";
-       default:
-               BUG();
-       }
+       return  phy_interface_mode_is_rgmii(phy_mode) ||
+               phy_mode == PHY_INTERFACE_MODE_GMII ||
+               phy_mode == PHY_INTERFACE_MODE_MII ||
+               phy_mode == PHY_INTERFACE_MODE_TBI ||
+               phy_mode == PHY_INTERFACE_MODE_RTBI;
 }
 
 static inline u32 rgmii_mode_mask(int mode, int input)
 {
        switch (mode) {
-       case PHY_MODE_RGMII:
+       case PHY_INTERFACE_MODE_RGMII:
+       case PHY_INTERFACE_MODE_RGMII_ID:
+       case PHY_INTERFACE_MODE_RGMII_RXID:
+       case PHY_INTERFACE_MODE_RGMII_TXID:
                return RGMII_FER_RGMII(input);
-       case PHY_MODE_TBI:
+       case PHY_INTERFACE_MODE_TBI:
                return RGMII_FER_TBI(input);
-       case PHY_MODE_GMII:
+       case PHY_INTERFACE_MODE_GMII:
                return RGMII_FER_GMII(input);
-       case PHY_MODE_MII:
+       case PHY_INTERFACE_MODE_MII:
                return RGMII_FER_MII(input);
-       case PHY_MODE_RTBI:
+       case PHY_INTERFACE_MODE_RTBI:
                return RGMII_FER_RTBI(input);
        default:
                BUG();
@@ -115,7 +100,7 @@ int rgmii_attach(struct platform_device *ofdev, int input, int mode)
        out_be32(&p->fer, in_be32(&p->fer) | rgmii_mode_mask(mode, input));
 
        printk(KERN_NOTICE "%pOF: input %d in %s mode\n",
-              ofdev->dev.of_node, input, rgmii_mode_name(mode));
+              ofdev->dev.of_node, input, phy_modes(mode));
 
        ++dev->users;
 
index 89c42d3..fdcc734 100644 (file)
  */
 static inline int zmii_valid_mode(int mode)
 {
-       return  mode == PHY_MODE_MII ||
-               mode == PHY_MODE_RMII ||
-               mode == PHY_MODE_SMII ||
-               mode == PHY_MODE_NA;
+       return  mode == PHY_INTERFACE_MODE_MII ||
+               mode == PHY_INTERFACE_MODE_RMII ||
+               mode == PHY_INTERFACE_MODE_SMII ||
+               mode == PHY_INTERFACE_MODE_NA;
 }
 
 static inline const char *zmii_mode_name(int mode)
 {
        switch (mode) {
-       case PHY_MODE_MII:
+       case PHY_INTERFACE_MODE_MII:
                return "MII";
-       case PHY_MODE_RMII:
+       case PHY_INTERFACE_MODE_RMII:
                return "RMII";
-       case PHY_MODE_SMII:
+       case PHY_INTERFACE_MODE_SMII:
                return "SMII";
        default:
                BUG();
@@ -72,11 +72,11 @@ static inline const char *zmii_mode_name(int mode)
 static inline u32 zmii_mode_mask(int mode, int input)
 {
        switch (mode) {
-       case PHY_MODE_MII:
+       case PHY_INTERFACE_MODE_MII:
                return ZMII_FER_MII(input);
-       case PHY_MODE_RMII:
+       case PHY_INTERFACE_MODE_RMII:
                return ZMII_FER_RMII(input);
-       case PHY_MODE_SMII:
+       case PHY_INTERFACE_MODE_SMII:
                return ZMII_FER_SMII(input);
        default:
                return 0;
@@ -106,27 +106,27 @@ int zmii_attach(struct platform_device *ofdev, int input, int *mode)
         * Please, always specify PHY mode in your board port to avoid
         * any surprises.
         */
-       if (dev->mode == PHY_MODE_NA) {
-               if (*mode == PHY_MODE_NA) {
+       if (dev->mode == PHY_INTERFACE_MODE_NA) {
+               if (*mode == PHY_INTERFACE_MODE_NA) {
                        u32 r = dev->fer_save;
 
                        ZMII_DBG(dev, "autodetecting mode, FER = 0x%08x" NL, r);
 
                        if (r & (ZMII_FER_MII(0) | ZMII_FER_MII(1)))
-                               dev->mode = PHY_MODE_MII;
+                               dev->mode = PHY_INTERFACE_MODE_MII;
                        else if (r & (ZMII_FER_RMII(0) | ZMII_FER_RMII(1)))
-                               dev->mode = PHY_MODE_RMII;
+                               dev->mode = PHY_INTERFACE_MODE_RMII;
                        else
-                               dev->mode = PHY_MODE_SMII;
-               } else
+                               dev->mode = PHY_INTERFACE_MODE_SMII;
+               } else {
                        dev->mode = *mode;
-
+               }
                printk(KERN_NOTICE "%pOF: bridge in %s mode\n",
                       ofdev->dev.of_node,
                       zmii_mode_name(dev->mode));
        } else {
                /* All inputs must use the same mode */
-               if (*mode != PHY_MODE_NA && *mode != dev->mode) {
+               if (*mode != PHY_INTERFACE_MODE_NA && *mode != dev->mode) {
                        printk(KERN_ERR
                               "%pOF: invalid mode %d specified for input %d\n",
                               ofdev->dev.of_node, *mode, input);
@@ -246,7 +246,7 @@ static int zmii_probe(struct platform_device *ofdev)
 
        mutex_init(&dev->lock);
        dev->ofdev = ofdev;
-       dev->mode = PHY_MODE_NA;
+       dev->mode = PHY_INTERFACE_MODE_NA;
 
        rc = -ENXIO;
        if (of_address_to_resource(np, 0, &regs)) {
index 5f6cf72..cfd788b 100644 (file)
@@ -1585,6 +1585,8 @@ static int i40e_set_ringparam(struct net_device *netdev,
                         */
                        rx_rings[i].desc = NULL;
                        rx_rings[i].rx_bi = NULL;
+                       /* Clear cloned XDP RX-queue info before setup call */
+                       memset(&rx_rings[i].xdp_rxq, 0, sizeof(rx_rings[i].xdp_rxq));
                        /* this is to allow wr32 to have something to write to
                         * during early allocation of Rx buffers
                         */
index 4566d66..2a8a85e 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/prefetch.h>
 #include <net/busy_poll.h>
 #include <linux/bpf_trace.h>
+#include <net/xdp.h>
 #include "i40e.h"
 #include "i40e_trace.h"
 #include "i40e_prototype.h"
@@ -1236,6 +1237,8 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
 void i40e_free_rx_resources(struct i40e_ring *rx_ring)
 {
        i40e_clean_rx_ring(rx_ring);
+       if (rx_ring->vsi->type == I40E_VSI_MAIN)
+               xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
        rx_ring->xdp_prog = NULL;
        kfree(rx_ring->rx_bi);
        rx_ring->rx_bi = NULL;
@@ -1256,6 +1259,7 @@ void i40e_free_rx_resources(struct i40e_ring *rx_ring)
 int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
 {
        struct device *dev = rx_ring->dev;
+       int err = -ENOMEM;
        int bi_size;
 
        /* warn if we are about to overwrite the pointer */
@@ -1283,13 +1287,21 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
        rx_ring->next_to_clean = 0;
        rx_ring->next_to_use = 0;
 
+       /* XDP RX-queue info only needed for RX rings exposed to XDP */
+       if (rx_ring->vsi->type == I40E_VSI_MAIN) {
+               err = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev,
+                                      rx_ring->queue_index);
+               if (err < 0)
+                       goto err;
+       }
+
        rx_ring->xdp_prog = rx_ring->vsi->xdp_prog;
 
        return 0;
 err:
        kfree(rx_ring->rx_bi);
        rx_ring->rx_bi = NULL;
-       return -ENOMEM;
+       return err;
 }
 
 /**
@@ -2068,11 +2080,13 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
        struct sk_buff *skb = rx_ring->skb;
        u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
        bool failure = false, xdp_xmit = false;
+       struct xdp_buff xdp;
+
+       xdp.rxq = &rx_ring->xdp_rxq;
 
        while (likely(total_rx_packets < (unsigned int)budget)) {
                struct i40e_rx_buffer *rx_buffer;
                union i40e_rx_desc *rx_desc;
-               struct xdp_buff xdp;
                unsigned int size;
                u16 vlan_tag;
                u8 rx_ptype;
index fbae118..2d08760 100644 (file)
@@ -27,6 +27,8 @@
 #ifndef _I40E_TXRX_H_
 #define _I40E_TXRX_H_
 
+#include <net/xdp.h>
+
 /* Interrupt Throttling and Rate Limiting Goodies */
 
 #define I40E_MAX_ITR               0x0FF0  /* reg uses 2 usec resolution */
@@ -428,6 +430,7 @@ struct i40e_ring {
                                         */
 
        struct i40e_channel *ch;
+       struct xdp_rxq_info xdp_rxq;
 } ____cacheline_internodealigned_in_smp;
 
 static inline bool ring_uses_build_skb(struct i40e_ring *ring)
index 468c355..8611763 100644 (file)
@@ -53,6 +53,7 @@
 #include <linux/dca.h>
 #endif
 
+#include <net/xdp.h>
 #include <net/busy_poll.h>
 
 /* common prefix used by pr_<> macros */
@@ -371,6 +372,7 @@ struct ixgbe_ring {
                struct ixgbe_tx_queue_stats tx_stats;
                struct ixgbe_rx_queue_stats rx_stats;
        };
+       struct xdp_rxq_info xdp_rxq;
 } ____cacheline_internodealigned_in_smp;
 
 enum ixgbe_ring_f_enum {
index 0aad1c2..0aaf70b 100644 (file)
@@ -1156,6 +1156,10 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
                        memcpy(&temp_ring[i], adapter->rx_ring[i],
                               sizeof(struct ixgbe_ring));
 
+                       /* Clear copied XDP RX-queue info */
+                       memset(&temp_ring[i].xdp_rxq, 0,
+                              sizeof(temp_ring[i].xdp_rxq));
+
                        temp_ring[i].count = new_rx_count;
                        err = ixgbe_setup_rx_resources(adapter, &temp_ring[i]);
                        if (err) {
index 7737a05..95aba97 100644 (file)
@@ -2318,12 +2318,14 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
 #endif /* IXGBE_FCOE */
        u16 cleaned_count = ixgbe_desc_unused(rx_ring);
        bool xdp_xmit = false;
+       struct xdp_buff xdp;
+
+       xdp.rxq = &rx_ring->xdp_rxq;
 
        while (likely(total_rx_packets < budget)) {
                union ixgbe_adv_rx_desc *rx_desc;
                struct ixgbe_rx_buffer *rx_buffer;
                struct sk_buff *skb;
-               struct xdp_buff xdp;
                unsigned int size;
 
                /* return some buffers to hardware, one at a time is too slow */
@@ -6444,6 +6446,11 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
        rx_ring->next_to_clean = 0;
        rx_ring->next_to_use = 0;
 
+       /* XDP RX-queue info */
+       if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, adapter->netdev,
+                            rx_ring->queue_index) < 0)
+               goto err;
+
        rx_ring->xdp_prog = adapter->xdp_prog;
 
        return 0;
@@ -6541,6 +6548,7 @@ void ixgbe_free_rx_resources(struct ixgbe_ring *rx_ring)
        ixgbe_clean_rx_ring(rx_ring);
 
        rx_ring->xdp_prog = NULL;
+       xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
        vfree(rx_ring->rx_buffer_info);
        rx_ring->rx_buffer_info = NULL;
 
index da6fb82..ebe5c91 100644 (file)
@@ -60,7 +60,7 @@ config MVNETA
        depends on ARCH_MVEBU || COMPILE_TEST
        depends on HAS_DMA
        select MVMDIO
-       select FIXED_PHY
+       select PHYLINK
        ---help---
          This driver supports the network interface units in the
          Marvell ARMADA XP, ARMADA 370, ARMADA 38x and
index bc93b69..25e9a55 100644 (file)
@@ -28,7 +28,7 @@
 #include <linux/of_mdio.h>
 #include <linux/of_net.h>
 #include <linux/phy.h>
-#include <linux/phy_fixed.h>
+#include <linux/phylink.h>
 #include <linux/platform_device.h>
 #include <linux/skbuff.h>
 #include <net/hwbm.h>
 #define MVNETA_GMAC_CTRL_0                       0x2c00
 #define      MVNETA_GMAC_MAX_RX_SIZE_SHIFT       2
 #define      MVNETA_GMAC_MAX_RX_SIZE_MASK        0x7ffc
+#define      MVNETA_GMAC0_PORT_1000BASE_X        BIT(1)
 #define      MVNETA_GMAC0_PORT_ENABLE            BIT(0)
 #define MVNETA_GMAC_CTRL_2                       0x2c08
 #define      MVNETA_GMAC2_INBAND_AN_ENABLE       BIT(0)
 #define      MVNETA_GMAC_TX_FLOW_CTRL_ENABLE     BIT(5)
 #define      MVNETA_GMAC_RX_FLOW_CTRL_ACTIVE     BIT(6)
 #define      MVNETA_GMAC_TX_FLOW_CTRL_ACTIVE     BIT(7)
+#define      MVNETA_GMAC_AN_COMPLETE             BIT(11)
+#define      MVNETA_GMAC_SYNC_OK                 BIT(14)
 #define MVNETA_GMAC_AUTONEG_CONFIG               0x2c0c
 #define      MVNETA_GMAC_FORCE_LINK_DOWN         BIT(0)
 #define      MVNETA_GMAC_FORCE_LINK_PASS         BIT(1)
 #define      MVNETA_GMAC_INBAND_AN_ENABLE        BIT(2)
+#define      MVNETA_GMAC_AN_BYPASS_ENABLE        BIT(3)
+#define      MVNETA_GMAC_INBAND_RESTART_AN       BIT(4)
 #define      MVNETA_GMAC_CONFIG_MII_SPEED        BIT(5)
 #define      MVNETA_GMAC_CONFIG_GMII_SPEED       BIT(6)
 #define      MVNETA_GMAC_AN_SPEED_EN             BIT(7)
+#define      MVNETA_GMAC_CONFIG_FLOW_CTRL        BIT(8)
+#define      MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL    BIT(9)
 #define      MVNETA_GMAC_AN_FLOW_CTRL_EN         BIT(11)
 #define      MVNETA_GMAC_CONFIG_FULL_DUPLEX      BIT(12)
 #define      MVNETA_GMAC_AN_DUPLEX_EN            BIT(13)
 #define MVNETA_TXQ_TOKEN_SIZE_REG(q)             (0x3e40 + ((q) << 2))
 #define      MVNETA_TXQ_TOKEN_SIZE_MAX           0x7fffffff
 
+#define MVNETA_LPI_CTRL_0                        0x2cc0
+#define MVNETA_LPI_CTRL_1                        0x2cc4
+#define      MVNETA_LPI_REQUEST_ENABLE           BIT(0)
+#define MVNETA_LPI_CTRL_2                        0x2cc8
+#define MVNETA_LPI_STATUS                        0x2ccc
+
 #define MVNETA_CAUSE_TXQ_SENT_DESC_ALL_MASK     0xff
 
 /* Descriptor ring Macros */
 #define MVNETA_RX_GET_BM_POOL_ID(rxd) \
        (((rxd)->status & MVNETA_RXD_BM_POOL_MASK) >> MVNETA_RXD_BM_POOL_SHIFT)
 
+enum {
+       ETHTOOL_STAT_EEE_WAKEUP,
+       ETHTOOL_MAX_STATS,
+};
+
 struct mvneta_statistic {
        unsigned short offset;
        unsigned short type;
@@ -321,6 +339,7 @@ struct mvneta_statistic {
 
 #define T_REG_32       32
 #define T_REG_64       64
+#define T_SW           1
 
 static const struct mvneta_statistic mvneta_statistics[] = {
        { 0x3000, T_REG_64, "good_octets_received", },
@@ -355,6 +374,7 @@ static const struct mvneta_statistic mvneta_statistics[] = {
        { 0x304c, T_REG_32, "broadcast_frames_sent", },
        { 0x3054, T_REG_32, "fc_sent", },
        { 0x300c, T_REG_32, "internal_mac_transmit_err", },
+       { ETHTOOL_STAT_EEE_WAKEUP, T_SW, "eee_wakeup_errors", },
 };
 
 struct mvneta_pcpu_stats {
@@ -407,20 +427,20 @@ struct mvneta_port {
        u16 tx_ring_size;
        u16 rx_ring_size;
 
-       struct mii_bus *mii_bus;
        phy_interface_t phy_interface;
-       struct device_node *phy_node;
-       unsigned int link;
-       unsigned int duplex;
-       unsigned int speed;
+       struct device_node *dn;
        unsigned int tx_csum_limit;
-       unsigned int use_inband_status:1;
+       struct phylink *phylink;
 
        struct mvneta_bm *bm_priv;
        struct mvneta_bm_pool *pool_long;
        struct mvneta_bm_pool *pool_short;
        int bm_win_id;
 
+       bool eee_enabled;
+       bool eee_active;
+       bool tx_lpi_enabled;
+
        u64 ethtool_stats[ARRAY_SIZE(mvneta_statistics)];
 
        u32 indir[MVNETA_RSS_LU_TABLE_SIZE];
@@ -1273,44 +1293,6 @@ static void mvneta_set_other_mcast_table(struct mvneta_port *pp, int queue)
                mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + offset, val);
 }
 
-static void mvneta_set_autoneg(struct mvneta_port *pp, int enable)
-{
-       u32 val;
-
-       if (enable) {
-               val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
-               val &= ~(MVNETA_GMAC_FORCE_LINK_PASS |
-                        MVNETA_GMAC_FORCE_LINK_DOWN |
-                        MVNETA_GMAC_AN_FLOW_CTRL_EN);
-               val |= MVNETA_GMAC_INBAND_AN_ENABLE |
-                      MVNETA_GMAC_AN_SPEED_EN |
-                      MVNETA_GMAC_AN_DUPLEX_EN;
-               mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
-
-               val = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER);
-               val |= MVNETA_GMAC_1MS_CLOCK_ENABLE;
-               mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, val);
-
-               val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
-               val |= MVNETA_GMAC2_INBAND_AN_ENABLE;
-               mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
-       } else {
-               val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
-               val &= ~(MVNETA_GMAC_INBAND_AN_ENABLE |
-                      MVNETA_GMAC_AN_SPEED_EN |
-                      MVNETA_GMAC_AN_DUPLEX_EN);
-               mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
-
-               val = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER);
-               val &= ~MVNETA_GMAC_1MS_CLOCK_ENABLE;
-               mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, val);
-
-               val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
-               val &= ~MVNETA_GMAC2_INBAND_AN_ENABLE;
-               mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
-       }
-}
-
 static void mvneta_percpu_unmask_interrupt(void *arg)
 {
        struct mvneta_port *pp = arg;
@@ -1463,7 +1445,6 @@ static void mvneta_defaults_set(struct mvneta_port *pp)
        val &= ~MVNETA_PHY_POLLING_ENABLE;
        mvreg_write(pp, MVNETA_UNIT_CONTROL, val);
 
-       mvneta_set_autoneg(pp, pp->use_inband_status);
        mvneta_set_ucast_table(pp, -1);
        mvneta_set_special_mcast_table(pp, -1);
        mvneta_set_other_mcast_table(pp, -1);
@@ -1958,9 +1939,9 @@ static int mvneta_rx_swbm(struct mvneta_port *pp, int rx_todo,
 
                if (!mvneta_rxq_desc_is_first_last(rx_status) ||
                    (rx_status & MVNETA_RXD_ERR_SUMMARY)) {
+                       mvneta_rx_error(pp, rx_desc);
 err_drop_frame:
                        dev->stats.rx_errors++;
-                       mvneta_rx_error(pp, rx_desc);
                        /* leave the descriptor untouched */
                        continue;
                }
@@ -2688,26 +2669,11 @@ static irqreturn_t mvneta_percpu_isr(int irq, void *dev_id)
        return IRQ_HANDLED;
 }
 
-static int mvneta_fixed_link_update(struct mvneta_port *pp,
-                                   struct phy_device *phy)
+static void mvneta_link_change(struct mvneta_port *pp)
 {
-       struct fixed_phy_status status;
-       struct fixed_phy_status changed = {};
        u32 gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS);
 
-       status.link = !!(gmac_stat & MVNETA_GMAC_LINK_UP);
-       if (gmac_stat & MVNETA_GMAC_SPEED_1000)
-               status.speed = SPEED_1000;
-       else if (gmac_stat & MVNETA_GMAC_SPEED_100)
-               status.speed = SPEED_100;
-       else
-               status.speed = SPEED_10;
-       status.duplex = !!(gmac_stat & MVNETA_GMAC_FULL_DUPLEX);
-       changed.link = 1;
-       changed.speed = 1;
-       changed.duplex = 1;
-       fixed_phy_update_state(phy, &status, &changed);
-       return 0;
+       phylink_mac_change(pp->phylink, !!(gmac_stat & MVNETA_GMAC_LINK_UP));
 }
 
 /* NAPI handler
@@ -2723,7 +2689,6 @@ static int mvneta_poll(struct napi_struct *napi, int budget)
        u32 cause_rx_tx;
        int rx_queue;
        struct mvneta_port *pp = netdev_priv(napi->dev);
-       struct net_device *ndev = pp->dev;
        struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports);
 
        if (!netif_running(pp->dev)) {
@@ -2737,12 +2702,10 @@ static int mvneta_poll(struct napi_struct *napi, int budget)
                u32 cause_misc = mvreg_read(pp, MVNETA_INTR_MISC_CAUSE);
 
                mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
-               if (pp->use_inband_status && (cause_misc &
-                               (MVNETA_CAUSE_PHY_STATUS_CHANGE |
-                                MVNETA_CAUSE_LINK_CHANGE |
-                                MVNETA_CAUSE_PSC_SYNC_CHANGE))) {
-                       mvneta_fixed_link_update(pp, ndev->phydev);
-               }
+
+               if (cause_misc & (MVNETA_CAUSE_PHY_STATUS_CHANGE |
+                                 MVNETA_CAUSE_LINK_CHANGE))
+                       mvneta_link_change(pp);
        }
 
        /* Release Tx descriptors */
@@ -3011,7 +2974,7 @@ static void mvneta_cleanup_rxqs(struct mvneta_port *pp)
 {
        int queue;
 
-       for (queue = 0; queue < txq_number; queue++)
+       for (queue = 0; queue < rxq_number; queue++)
                mvneta_rxq_deinit(pp, &pp->rxqs[queue]);
 }
 
@@ -3056,7 +3019,6 @@ static int mvneta_setup_txqs(struct mvneta_port *pp)
 static void mvneta_start_dev(struct mvneta_port *pp)
 {
        int cpu;
-       struct net_device *ndev = pp->dev;
 
        mvneta_max_rx_size_set(pp, pp->pkt_size);
        mvneta_txq_max_tx_size_set(pp, pp->pkt_size);
@@ -3081,19 +3043,17 @@ static void mvneta_start_dev(struct mvneta_port *pp)
 
        mvreg_write(pp, MVNETA_INTR_MISC_MASK,
                    MVNETA_CAUSE_PHY_STATUS_CHANGE |
-                   MVNETA_CAUSE_LINK_CHANGE |
-                   MVNETA_CAUSE_PSC_SYNC_CHANGE);
+                   MVNETA_CAUSE_LINK_CHANGE);
 
-       phy_start(ndev->phydev);
+       phylink_start(pp->phylink);
        netif_tx_start_all_queues(pp->dev);
 }
 
 static void mvneta_stop_dev(struct mvneta_port *pp)
 {
        unsigned int cpu;
-       struct net_device *ndev = pp->dev;
 
-       phy_stop(ndev->phydev);
+       phylink_stop(pp->phylink);
 
        if (!pp->neta_armada3700) {
                for_each_online_cpu(cpu) {
@@ -3247,103 +3207,260 @@ static int mvneta_set_mac_addr(struct net_device *dev, void *addr)
        return 0;
 }
 
-static void mvneta_adjust_link(struct net_device *ndev)
+static void mvneta_validate(struct net_device *ndev, unsigned long *supported,
+                           struct phylink_link_state *state)
+{
+       __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
+
+       /* We only support QSGMII, SGMII, 802.3z and RGMII modes */
+       if (state->interface != PHY_INTERFACE_MODE_NA &&
+           state->interface != PHY_INTERFACE_MODE_QSGMII &&
+           state->interface != PHY_INTERFACE_MODE_SGMII &&
+           !phy_interface_mode_is_8023z(state->interface) &&
+           !phy_interface_mode_is_rgmii(state->interface)) {
+               bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
+               return;
+       }
+
+       /* Allow all the expected bits */
+       phylink_set(mask, Autoneg);
+       phylink_set_port_modes(mask);
+
+       /* Asymmetric pause is unsupported */
+       phylink_set(mask, Pause);
+       /* Half-duplex at speeds higher than 100Mbit is unsupported */
+       phylink_set(mask, 1000baseT_Full);
+       phylink_set(mask, 1000baseX_Full);
+
+       if (!phy_interface_mode_is_8023z(state->interface)) {
+               /* 10M and 100M are only supported in non-802.3z mode */
+               phylink_set(mask, 10baseT_Half);
+               phylink_set(mask, 10baseT_Full);
+               phylink_set(mask, 100baseT_Half);
+               phylink_set(mask, 100baseT_Full);
+       }
+
+       bitmap_and(supported, supported, mask,
+                  __ETHTOOL_LINK_MODE_MASK_NBITS);
+       bitmap_and(state->advertising, state->advertising, mask,
+                  __ETHTOOL_LINK_MODE_MASK_NBITS);
+}
+
+static int mvneta_mac_link_state(struct net_device *ndev,
+                                struct phylink_link_state *state)
 {
        struct mvneta_port *pp = netdev_priv(ndev);
-       struct phy_device *phydev = ndev->phydev;
-       int status_change = 0;
+       u32 gmac_stat;
 
-       if (phydev->link) {
-               if ((pp->speed != phydev->speed) ||
-                   (pp->duplex != phydev->duplex)) {
-                       u32 val;
+       gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS);
 
-                       val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
-                       val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED |
-                                MVNETA_GMAC_CONFIG_GMII_SPEED |
-                                MVNETA_GMAC_CONFIG_FULL_DUPLEX);
+       if (gmac_stat & MVNETA_GMAC_SPEED_1000)
+               state->speed = SPEED_1000;
+       else if (gmac_stat & MVNETA_GMAC_SPEED_100)
+               state->speed = SPEED_100;
+       else
+               state->speed = SPEED_10;
 
-                       if (phydev->duplex)
-                               val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX;
+       state->an_complete = !!(gmac_stat & MVNETA_GMAC_AN_COMPLETE);
+       state->link = !!(gmac_stat & MVNETA_GMAC_LINK_UP);
+       state->duplex = !!(gmac_stat & MVNETA_GMAC_FULL_DUPLEX);
 
-                       if (phydev->speed == SPEED_1000)
-                               val |= MVNETA_GMAC_CONFIG_GMII_SPEED;
-                       else if (phydev->speed == SPEED_100)
-                               val |= MVNETA_GMAC_CONFIG_MII_SPEED;
+       state->pause = 0;
+       if (gmac_stat & MVNETA_GMAC_RX_FLOW_CTRL_ENABLE)
+               state->pause |= MLO_PAUSE_RX;
+       if (gmac_stat & MVNETA_GMAC_TX_FLOW_CTRL_ENABLE)
+               state->pause |= MLO_PAUSE_TX;
 
-                       mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
+       return 1;
+}
 
-                       pp->duplex = phydev->duplex;
-                       pp->speed  = phydev->speed;
-               }
+static void mvneta_mac_an_restart(struct net_device *ndev)
+{
+       struct mvneta_port *pp = netdev_priv(ndev);
+       u32 gmac_an = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
+
+       mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
+                   gmac_an | MVNETA_GMAC_INBAND_RESTART_AN);
+       mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
+                   gmac_an & ~MVNETA_GMAC_INBAND_RESTART_AN);
+}
+
+static void mvneta_mac_config(struct net_device *ndev, unsigned int mode,
+       const struct phylink_link_state *state)
+{
+       struct mvneta_port *pp = netdev_priv(ndev);
+       u32 new_ctrl0, gmac_ctrl0 = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
+       u32 new_ctrl2, gmac_ctrl2 = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
+       u32 new_clk, gmac_clk = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER);
+       u32 new_an, gmac_an = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
+
+       new_ctrl0 = gmac_ctrl0 & ~MVNETA_GMAC0_PORT_1000BASE_X;
+       new_ctrl2 = gmac_ctrl2 & ~(MVNETA_GMAC2_INBAND_AN_ENABLE |
+                                  MVNETA_GMAC2_PORT_RESET);
+       new_clk = gmac_clk & ~MVNETA_GMAC_1MS_CLOCK_ENABLE;
+       new_an = gmac_an & ~(MVNETA_GMAC_INBAND_AN_ENABLE |
+                            MVNETA_GMAC_INBAND_RESTART_AN |
+                            MVNETA_GMAC_CONFIG_MII_SPEED |
+                            MVNETA_GMAC_CONFIG_GMII_SPEED |
+                            MVNETA_GMAC_AN_SPEED_EN |
+                            MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL |
+                            MVNETA_GMAC_CONFIG_FLOW_CTRL |
+                            MVNETA_GMAC_AN_FLOW_CTRL_EN |
+                            MVNETA_GMAC_CONFIG_FULL_DUPLEX |
+                            MVNETA_GMAC_AN_DUPLEX_EN);
+
+       /* Even though it might look weird, when we're configured in
+        * SGMII or QSGMII mode, the RGMII bit needs to be set.
+        */
+       new_ctrl2 |= MVNETA_GMAC2_PORT_RGMII;
+
+       if (state->interface == PHY_INTERFACE_MODE_QSGMII ||
+           state->interface == PHY_INTERFACE_MODE_SGMII ||
+           phy_interface_mode_is_8023z(state->interface))
+               new_ctrl2 |= MVNETA_GMAC2_PCS_ENABLE;
+
+       if (phylink_test(state->advertising, Pause))
+               new_an |= MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL;
+       if (state->pause & MLO_PAUSE_TXRX_MASK)
+               new_an |= MVNETA_GMAC_CONFIG_FLOW_CTRL;
+
+       if (!phylink_autoneg_inband(mode)) {
+               /* Phy or fixed speed */
+               if (state->duplex)
+                       new_an |= MVNETA_GMAC_CONFIG_FULL_DUPLEX;
+
+               if (state->speed == SPEED_1000)
+                       new_an |= MVNETA_GMAC_CONFIG_GMII_SPEED;
+               else if (state->speed == SPEED_100)
+                       new_an |= MVNETA_GMAC_CONFIG_MII_SPEED;
+       } else if (state->interface == PHY_INTERFACE_MODE_SGMII) {
+               /* SGMII mode receives the state from the PHY */
+               new_ctrl2 |= MVNETA_GMAC2_INBAND_AN_ENABLE;
+               new_clk |= MVNETA_GMAC_1MS_CLOCK_ENABLE;
+               new_an = (new_an & ~(MVNETA_GMAC_FORCE_LINK_DOWN |
+                                    MVNETA_GMAC_FORCE_LINK_PASS)) |
+                        MVNETA_GMAC_INBAND_AN_ENABLE |
+                        MVNETA_GMAC_AN_SPEED_EN |
+                        MVNETA_GMAC_AN_DUPLEX_EN;
+       } else {
+               /* 802.3z negotiation - only 1000base-X */
+               new_ctrl0 |= MVNETA_GMAC0_PORT_1000BASE_X;
+               new_clk |= MVNETA_GMAC_1MS_CLOCK_ENABLE;
+               new_an = (new_an & ~(MVNETA_GMAC_FORCE_LINK_DOWN |
+                                    MVNETA_GMAC_FORCE_LINK_PASS)) |
+                        MVNETA_GMAC_INBAND_AN_ENABLE |
+                        MVNETA_GMAC_CONFIG_GMII_SPEED |
+                        /* The MAC only supports FD mode */
+                        MVNETA_GMAC_CONFIG_FULL_DUPLEX;
+
+               if (state->pause & MLO_PAUSE_AN && state->an_enabled)
+                       new_an |= MVNETA_GMAC_AN_FLOW_CTRL_EN;
        }
 
-       if (phydev->link != pp->link) {
-               if (!phydev->link) {
-                       pp->duplex = -1;
-                       pp->speed = 0;
-               }
+       /* Armada 370 documentation says we can only change the port mode
+        * and in-band enable when the link is down, so force it down
+        * while making these changes. We also do this for GMAC_CTRL2 */
+       if ((new_ctrl0 ^ gmac_ctrl0) & MVNETA_GMAC0_PORT_1000BASE_X ||
+           (new_ctrl2 ^ gmac_ctrl2) & MVNETA_GMAC2_INBAND_AN_ENABLE ||
+           (new_an  ^ gmac_an) & MVNETA_GMAC_INBAND_AN_ENABLE) {
+               mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
+                           (gmac_an & ~MVNETA_GMAC_FORCE_LINK_PASS) |
+                           MVNETA_GMAC_FORCE_LINK_DOWN);
+       }
 
-               pp->link = phydev->link;
-               status_change = 1;
+       if (new_ctrl0 != gmac_ctrl0)
+               mvreg_write(pp, MVNETA_GMAC_CTRL_0, new_ctrl0);
+       if (new_ctrl2 != gmac_ctrl2)
+               mvreg_write(pp, MVNETA_GMAC_CTRL_2, new_ctrl2);
+       if (new_clk != gmac_clk)
+               mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, new_clk);
+       if (new_an != gmac_an)
+               mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, new_an);
+
+       if (gmac_ctrl2 & MVNETA_GMAC2_PORT_RESET) {
+               while ((mvreg_read(pp, MVNETA_GMAC_CTRL_2) &
+                       MVNETA_GMAC2_PORT_RESET) != 0)
+                       continue;
        }
+}
 
-       if (status_change) {
-               if (phydev->link) {
-                       if (!pp->use_inband_status) {
-                               u32 val = mvreg_read(pp,
-                                                 MVNETA_GMAC_AUTONEG_CONFIG);
-                               val &= ~MVNETA_GMAC_FORCE_LINK_DOWN;
-                               val |= MVNETA_GMAC_FORCE_LINK_PASS;
-                               mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
-                                           val);
-                       }
-                       mvneta_port_up(pp);
-               } else {
-                       if (!pp->use_inband_status) {
-                               u32 val = mvreg_read(pp,
-                                                 MVNETA_GMAC_AUTONEG_CONFIG);
-                               val &= ~MVNETA_GMAC_FORCE_LINK_PASS;
-                               val |= MVNETA_GMAC_FORCE_LINK_DOWN;
-                               mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
-                                           val);
-                       }
-                       mvneta_port_down(pp);
-               }
-               phy_print_status(phydev);
+static void mvneta_set_eee(struct mvneta_port *pp, bool enable)
+{
+       u32 lpi_ctl1;
+
+       lpi_ctl1 = mvreg_read(pp, MVNETA_LPI_CTRL_1);
+       if (enable)
+               lpi_ctl1 |= MVNETA_LPI_REQUEST_ENABLE;
+       else
+               lpi_ctl1 &= ~MVNETA_LPI_REQUEST_ENABLE;
+       mvreg_write(pp, MVNETA_LPI_CTRL_1, lpi_ctl1);
+}
+
+static void mvneta_mac_link_down(struct net_device *ndev, unsigned int mode)
+{
+       struct mvneta_port *pp = netdev_priv(ndev);
+       u32 val;
+
+       mvneta_port_down(pp);
+
+       if (!phylink_autoneg_inband(mode)) {
+               val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
+               val &= ~MVNETA_GMAC_FORCE_LINK_PASS;
+               val |= MVNETA_GMAC_FORCE_LINK_DOWN;
+               mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
        }
+
+       pp->eee_active = false;
+       mvneta_set_eee(pp, false);
 }
 
-static int mvneta_mdio_probe(struct mvneta_port *pp)
+static void mvneta_mac_link_up(struct net_device *ndev, unsigned int mode,
+                              struct phy_device *phy)
 {
-       struct phy_device *phy_dev;
-       struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
+       struct mvneta_port *pp = netdev_priv(ndev);
+       u32 val;
 
-       phy_dev = of_phy_connect(pp->dev, pp->phy_node, mvneta_adjust_link, 0,
-                                pp->phy_interface);
-       if (!phy_dev) {
-               netdev_err(pp->dev, "could not find the PHY\n");
-               return -ENODEV;
+       if (!phylink_autoneg_inband(mode)) {
+               val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
+               val &= ~MVNETA_GMAC_FORCE_LINK_DOWN;
+               val |= MVNETA_GMAC_FORCE_LINK_PASS;
+               mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
        }
 
-       phy_ethtool_get_wol(phy_dev, &wol);
-       device_set_wakeup_capable(&pp->dev->dev, !!wol.supported);
+       mvneta_port_up(pp);
 
-       phy_dev->supported &= PHY_GBIT_FEATURES;
-       phy_dev->advertising = phy_dev->supported;
+       if (phy && pp->eee_enabled) {
+               pp->eee_active = phy_init_eee(phy, 0) >= 0;
+               mvneta_set_eee(pp, pp->eee_active && pp->tx_lpi_enabled);
+       }
+}
 
-       pp->link    = 0;
-       pp->duplex  = 0;
-       pp->speed   = 0;
+static const struct phylink_mac_ops mvneta_phylink_ops = {
+       .validate = mvneta_validate,
+       .mac_link_state = mvneta_mac_link_state,
+       .mac_an_restart = mvneta_mac_an_restart,
+       .mac_config = mvneta_mac_config,
+       .mac_link_down = mvneta_mac_link_down,
+       .mac_link_up = mvneta_mac_link_up,
+};
 
-       return 0;
+static int mvneta_mdio_probe(struct mvneta_port *pp)
+{
+       struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
+       int err = phylink_of_phy_connect(pp->phylink, pp->dn, 0);
+
+       if (err)
+               netdev_err(pp->dev, "could not attach PHY: %d\n", err);
+
+       phylink_ethtool_get_wol(pp->phylink, &wol);
+       device_set_wakeup_capable(&pp->dev->dev, !!wol.supported);
+
+       return err;
 }
 
 static void mvneta_mdio_remove(struct mvneta_port *pp)
 {
-       struct net_device *ndev = pp->dev;
-
-       phy_disconnect(ndev->phydev);
+       phylink_disconnect_phy(pp->phylink);
 }
 
 /* Electing a CPU must be done in an atomic way: it should be done
@@ -3451,8 +3568,7 @@ static int mvneta_cpu_online(unsigned int cpu, struct hlist_node *node)
        on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
        mvreg_write(pp, MVNETA_INTR_MISC_MASK,
                    MVNETA_CAUSE_PHY_STATUS_CHANGE |
-                   MVNETA_CAUSE_LINK_CHANGE |
-                   MVNETA_CAUSE_PSC_SYNC_CHANGE);
+                   MVNETA_CAUSE_LINK_CHANGE);
        netif_tx_start_all_queues(pp->dev);
        spin_unlock(&pp->lock);
        return 0;
@@ -3493,8 +3609,7 @@ static int mvneta_cpu_dead(unsigned int cpu, struct hlist_node *node)
        on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
        mvreg_write(pp, MVNETA_INTR_MISC_MASK,
                    MVNETA_CAUSE_PHY_STATUS_CHANGE |
-                   MVNETA_CAUSE_LINK_CHANGE |
-                   MVNETA_CAUSE_PSC_SYNC_CHANGE);
+                   MVNETA_CAUSE_LINK_CHANGE);
        netif_tx_start_all_queues(pp->dev);
        return 0;
 }
@@ -3622,10 +3737,9 @@ static int mvneta_stop(struct net_device *dev)
 
 static int mvneta_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 {
-       if (!dev->phydev)
-               return -ENOTSUPP;
+       struct mvneta_port *pp = netdev_priv(dev);
 
-       return phy_mii_ioctl(dev->phydev, ifr, cmd);
+       return phylink_mii_ioctl(pp->phylink, ifr, cmd);
 }
 
 /* Ethtool methods */
@@ -3636,44 +3750,25 @@ mvneta_ethtool_set_link_ksettings(struct net_device *ndev,
                                  const struct ethtool_link_ksettings *cmd)
 {
        struct mvneta_port *pp = netdev_priv(ndev);
-       struct phy_device *phydev = ndev->phydev;
-
-       if (!phydev)
-               return -ENODEV;
-
-       if ((cmd->base.autoneg == AUTONEG_ENABLE) != pp->use_inband_status) {
-               u32 val;
-
-               mvneta_set_autoneg(pp, cmd->base.autoneg == AUTONEG_ENABLE);
-
-               if (cmd->base.autoneg == AUTONEG_DISABLE) {
-                       val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
-                       val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED |
-                                MVNETA_GMAC_CONFIG_GMII_SPEED |
-                                MVNETA_GMAC_CONFIG_FULL_DUPLEX);
 
-                       if (phydev->duplex)
-                               val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX;
-
-                       if (phydev->speed == SPEED_1000)
-                               val |= MVNETA_GMAC_CONFIG_GMII_SPEED;
-                       else if (phydev->speed == SPEED_100)
-                               val |= MVNETA_GMAC_CONFIG_MII_SPEED;
+       return phylink_ethtool_ksettings_set(pp->phylink, cmd);
+}
 
-                       mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
-               }
+/* Get link ksettings for ethtools */
+static int
+mvneta_ethtool_get_link_ksettings(struct net_device *ndev,
+                                 struct ethtool_link_ksettings *cmd)
+{
+       struct mvneta_port *pp = netdev_priv(ndev);
 
-               pp->use_inband_status = (cmd->base.autoneg == AUTONEG_ENABLE);
-               netdev_info(pp->dev, "autoneg status set to %i\n",
-                           pp->use_inband_status);
+       return phylink_ethtool_ksettings_get(pp->phylink, cmd);
+}
 
-               if (netif_running(ndev)) {
-                       mvneta_port_down(pp);
-                       mvneta_port_up(pp);
-               }
-       }
+static int mvneta_ethtool_nway_reset(struct net_device *dev)
+{
+       struct mvneta_port *pp = netdev_priv(dev);
 
-       return phy_ethtool_ksettings_set(ndev->phydev, cmd);
+       return phylink_ethtool_nway_reset(pp->phylink);
 }
 
 /* Set interrupt coalescing for ethtools */
@@ -3765,6 +3860,22 @@ static int mvneta_ethtool_set_ringparam(struct net_device *dev,
        return 0;
 }
 
+static void mvneta_ethtool_get_pauseparam(struct net_device *dev,
+                                         struct ethtool_pauseparam *pause)
+{
+       struct mvneta_port *pp = netdev_priv(dev);
+
+       phylink_ethtool_get_pauseparam(pp->phylink, pause);
+}
+
+static int mvneta_ethtool_set_pauseparam(struct net_device *dev,
+                                        struct ethtool_pauseparam *pause)
+{
+       struct mvneta_port *pp = netdev_priv(dev);
+
+       return phylink_ethtool_set_pauseparam(pp->phylink, pause);
+}
+
 static void mvneta_ethtool_get_strings(struct net_device *netdev, u32 sset,
                                       u8 *data)
 {
@@ -3781,26 +3892,35 @@ static void mvneta_ethtool_update_stats(struct mvneta_port *pp)
 {
        const struct mvneta_statistic *s;
        void __iomem *base = pp->base;
-       u32 high, low, val;
-       u64 val64;
+       u32 high, low;
+       u64 val;
        int i;
 
        for (i = 0, s = mvneta_statistics;
             s < mvneta_statistics + ARRAY_SIZE(mvneta_statistics);
             s++, i++) {
+               val = 0;
+
                switch (s->type) {
                case T_REG_32:
                        val = readl_relaxed(base + s->offset);
-                       pp->ethtool_stats[i] += val;
                        break;
                case T_REG_64:
                        /* Docs say to read low 32-bit then high */
                        low = readl_relaxed(base + s->offset);
                        high = readl_relaxed(base + s->offset + 4);
-                       val64 = (u64)high << 32 | low;
-                       pp->ethtool_stats[i] += val64;
+                       val = (u64)high << 32 | low;
+                       break;
+               case T_SW:
+                       switch (s->offset) {
+                       case ETHTOOL_STAT_EEE_WAKEUP:
+                               val = phylink_get_eee_err(pp->phylink);
+                               break;
+                       }
                        break;
                }
+
+               pp->ethtool_stats[i] += val;
        }
 }
 
@@ -3935,28 +4055,81 @@ static int mvneta_ethtool_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
 static void mvneta_ethtool_get_wol(struct net_device *dev,
                                   struct ethtool_wolinfo *wol)
 {
-       wol->supported = 0;
-       wol->wolopts = 0;
+       struct mvneta_port *pp = netdev_priv(dev);
 
-       if (dev->phydev)
-               phy_ethtool_get_wol(dev->phydev, wol);
+       phylink_ethtool_get_wol(pp->phylink, wol);
 }
 
 static int mvneta_ethtool_set_wol(struct net_device *dev,
                                  struct ethtool_wolinfo *wol)
 {
+       struct mvneta_port *pp = netdev_priv(dev);
        int ret;
 
-       if (!dev->phydev)
-               return -EOPNOTSUPP;
-
-       ret = phy_ethtool_set_wol(dev->phydev, wol);
+       ret = phylink_ethtool_set_wol(pp->phylink, wol);
        if (!ret)
                device_set_wakeup_enable(&dev->dev, !!wol->wolopts);
 
        return ret;
 }
 
+static int mvneta_ethtool_get_module_info(struct net_device *dev,
+                                         struct ethtool_modinfo *modinfo)
+{
+       struct mvneta_port *pp = netdev_priv(dev);
+
+       return phylink_ethtool_get_module_info(pp->phylink, modinfo);
+}
+
+static int mvneta_ethtool_get_module_eeprom(struct net_device *dev,
+                                           struct ethtool_eeprom *ee, u8 *buf)
+{
+       struct mvneta_port *pp = netdev_priv(dev);
+
+       return phylink_ethtool_get_module_eeprom(pp->phylink, ee, buf);
+}
+
+static int mvneta_ethtool_get_eee(struct net_device *dev,
+                                 struct ethtool_eee *eee)
+{
+       struct mvneta_port *pp = netdev_priv(dev);
+       u32 lpi_ctl0;
+
+       lpi_ctl0 = mvreg_read(pp, MVNETA_LPI_CTRL_0);
+
+       eee->eee_enabled = pp->eee_enabled;
+       eee->eee_active = pp->eee_active;
+       eee->tx_lpi_enabled = pp->tx_lpi_enabled;
+       eee->tx_lpi_timer = (lpi_ctl0) >> 8; // * scale;
+
+       return phylink_ethtool_get_eee(pp->phylink, eee);
+}
+
+static int mvneta_ethtool_set_eee(struct net_device *dev,
+                                 struct ethtool_eee *eee)
+{
+       struct mvneta_port *pp = netdev_priv(dev);
+       u32 lpi_ctl0;
+
+       /* The Armada 37x documents do not give limits for this other than
+        * it being an 8-bit register. */
+       if (eee->tx_lpi_enabled &&
+           (eee->tx_lpi_timer < 0 || eee->tx_lpi_timer > 255))
+               return -EINVAL;
+
+       lpi_ctl0 = mvreg_read(pp, MVNETA_LPI_CTRL_0);
+       lpi_ctl0 &= ~(0xff << 8);
+       lpi_ctl0 |= eee->tx_lpi_timer << 8;
+       mvreg_write(pp, MVNETA_LPI_CTRL_0, lpi_ctl0);
+
+       pp->eee_enabled = eee->eee_enabled;
+       pp->tx_lpi_enabled = eee->tx_lpi_enabled;
+
+       mvneta_set_eee(pp, eee->tx_lpi_enabled && eee->eee_enabled);
+
+       return phylink_ethtool_set_eee(pp->phylink, eee);
+}
+
 static const struct net_device_ops mvneta_netdev_ops = {
        .ndo_open            = mvneta_open,
        .ndo_stop            = mvneta_stop,
@@ -3970,13 +4143,15 @@ static const struct net_device_ops mvneta_netdev_ops = {
 };
 
 static const struct ethtool_ops mvneta_eth_tool_ops = {
-       .nway_reset     = phy_ethtool_nway_reset,
+       .nway_reset     = mvneta_ethtool_nway_reset,
        .get_link       = ethtool_op_get_link,
        .set_coalesce   = mvneta_ethtool_set_coalesce,
        .get_coalesce   = mvneta_ethtool_get_coalesce,
        .get_drvinfo    = mvneta_ethtool_get_drvinfo,
        .get_ringparam  = mvneta_ethtool_get_ringparam,
        .set_ringparam  = mvneta_ethtool_set_ringparam,
+       .get_pauseparam = mvneta_ethtool_get_pauseparam,
+       .set_pauseparam = mvneta_ethtool_set_pauseparam,
        .get_strings    = mvneta_ethtool_get_strings,
        .get_ethtool_stats = mvneta_ethtool_get_stats,
        .get_sset_count = mvneta_ethtool_get_sset_count,
@@ -3984,10 +4159,14 @@ static const struct ethtool_ops mvneta_eth_tool_ops = {
        .get_rxnfc      = mvneta_ethtool_get_rxnfc,
        .get_rxfh       = mvneta_ethtool_get_rxfh,
        .set_rxfh       = mvneta_ethtool_set_rxfh,
-       .get_link_ksettings = phy_ethtool_get_link_ksettings,
+       .get_link_ksettings = mvneta_ethtool_get_link_ksettings,
        .set_link_ksettings = mvneta_ethtool_set_link_ksettings,
        .get_wol        = mvneta_ethtool_get_wol,
        .set_wol        = mvneta_ethtool_set_wol,
+       .get_module_info = mvneta_ethtool_get_module_info,
+       .get_module_eeprom = mvneta_ethtool_get_module_eeprom,
+       .get_eee        = mvneta_ethtool_get_eee,
+       .set_eee        = mvneta_ethtool_set_eee,
 };
 
 /* Initialize hw */
@@ -4087,42 +4266,16 @@ static void mvneta_conf_mbus_windows(struct mvneta_port *pp,
 /* Power up the port */
 static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
 {
-       u32 ctrl;
-
        /* MAC Cause register should be cleared */
        mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0);
 
-       ctrl = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
-
-       /* Even though it might look weird, when we're configured in
-        * SGMII or QSGMII mode, the RGMII bit needs to be set.
-        */
-       switch(phy_mode) {
-       case PHY_INTERFACE_MODE_QSGMII:
+       if (phy_mode == PHY_INTERFACE_MODE_QSGMII)
                mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_QSGMII_SERDES_PROTO);
-               ctrl |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII;
-               break;
-       case PHY_INTERFACE_MODE_SGMII:
+       else if (phy_mode == PHY_INTERFACE_MODE_SGMII ||
+                phy_mode == PHY_INTERFACE_MODE_1000BASEX)
                mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO);
-               ctrl |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII;
-               break;
-       case PHY_INTERFACE_MODE_RGMII:
-       case PHY_INTERFACE_MODE_RGMII_ID:
-       case PHY_INTERFACE_MODE_RGMII_RXID:
-       case PHY_INTERFACE_MODE_RGMII_TXID:
-               ctrl |= MVNETA_GMAC2_PORT_RGMII;
-               break;
-       default:
+       else if (!phy_interface_mode_is_rgmii(phy_mode))
                return -EINVAL;
-       }
-
-       /* Cancel Port Reset */
-       ctrl &= ~MVNETA_GMAC2_PORT_RESET;
-       mvreg_write(pp, MVNETA_GMAC_CTRL_2, ctrl);
-
-       while ((mvreg_read(pp, MVNETA_GMAC_CTRL_2) &
-               MVNETA_GMAC2_PORT_RESET) != 0)
-               continue;
 
        return 0;
 }
@@ -4132,14 +4285,13 @@ static int mvneta_probe(struct platform_device *pdev)
 {
        struct resource *res;
        struct device_node *dn = pdev->dev.of_node;
-       struct device_node *phy_node;
        struct device_node *bm_node;
        struct mvneta_port *pp;
        struct net_device *dev;
+       struct phylink *phylink;
        const char *dt_mac_addr;
        char hw_mac_addr[ETH_ALEN];
        const char *mac_from;
-       const char *managed;
        int tx_csum_limit;
        int phy_mode;
        int err;
@@ -4155,31 +4307,18 @@ static int mvneta_probe(struct platform_device *pdev)
                goto err_free_netdev;
        }
 
-       phy_node = of_parse_phandle(dn, "phy", 0);
-       if (!phy_node) {
-               if (!of_phy_is_fixed_link(dn)) {
-                       dev_err(&pdev->dev, "no PHY specified\n");
-                       err = -ENODEV;
-                       goto err_free_irq;
-               }
-
-               err = of_phy_register_fixed_link(dn);
-               if (err < 0) {
-                       dev_err(&pdev->dev, "cannot register fixed PHY\n");
-                       goto err_free_irq;
-               }
-
-               /* In the case of a fixed PHY, the DT node associated
-                * to the PHY is the Ethernet MAC DT node.
-                */
-               phy_node = of_node_get(dn);
-       }
-
        phy_mode = of_get_phy_mode(dn);
        if (phy_mode < 0) {
                dev_err(&pdev->dev, "incorrect phy-mode\n");
                err = -EINVAL;
-               goto err_put_phy_node;
+               goto err_free_irq;
+       }
+
+       phylink = phylink_create(dev, pdev->dev.fwnode, phy_mode,
+                                &mvneta_phylink_ops);
+       if (IS_ERR(phylink)) {
+               err = PTR_ERR(phylink);
+               goto err_free_irq;
        }
 
        dev->tx_queue_len = MVNETA_MAX_TXD;
@@ -4190,12 +4329,9 @@ static int mvneta_probe(struct platform_device *pdev)
 
        pp = netdev_priv(dev);
        spin_lock_init(&pp->lock);
-       pp->phy_node = phy_node;
+       pp->phylink = phylink;
        pp->phy_interface = phy_mode;
-
-       err = of_property_read_string(dn, "managed", &managed);
-       pp->use_inband_status = (err == 0 &&
-                                strcmp(managed, "in-band-status") == 0);
+       pp->dn = dn;
 
        pp->rxq_def = rxq_def;
 
@@ -4217,7 +4353,7 @@ static int mvneta_probe(struct platform_device *pdev)
                pp->clk = devm_clk_get(&pdev->dev, NULL);
        if (IS_ERR(pp->clk)) {
                err = PTR_ERR(pp->clk);
-               goto err_put_phy_node;
+               goto err_free_phylink;
        }
 
        clk_prepare_enable(pp->clk);
@@ -4354,14 +4490,6 @@ static int mvneta_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, pp->dev);
 
-       if (pp->use_inband_status) {
-               struct phy_device *phy = of_phy_find_device(dn);
-
-               mvneta_fixed_link_update(pp, phy);
-
-               put_device(&phy->mdio.dev);
-       }
-
        return 0;
 
 err_netdev:
@@ -4378,10 +4506,9 @@ err_free_ports:
 err_clk:
        clk_disable_unprepare(pp->clk_bus);
        clk_disable_unprepare(pp->clk);
-err_put_phy_node:
-       of_node_put(phy_node);
-       if (of_phy_is_fixed_link(dn))
-               of_phy_deregister_fixed_link(dn);
+err_free_phylink:
+       if (pp->phylink)
+               phylink_destroy(pp->phylink);
 err_free_irq:
        irq_dispose_mapping(dev->irq);
 err_free_netdev:
@@ -4393,7 +4520,6 @@ err_free_netdev:
 static int mvneta_remove(struct platform_device *pdev)
 {
        struct net_device  *dev = platform_get_drvdata(pdev);
-       struct device_node *dn = pdev->dev.of_node;
        struct mvneta_port *pp = netdev_priv(dev);
 
        unregister_netdev(dev);
@@ -4401,10 +4527,8 @@ static int mvneta_remove(struct platform_device *pdev)
        clk_disable_unprepare(pp->clk);
        free_percpu(pp->ports);
        free_percpu(pp->stats);
-       if (of_phy_is_fixed_link(dn))
-               of_phy_deregister_fixed_link(dn);
        irq_dispose_mapping(dev->irq);
-       of_node_put(pp->phy_node);
+       phylink_destroy(pp->phylink);
        free_netdev(dev);
 
        if (pp->bm_priv) {
@@ -4422,8 +4546,10 @@ static int mvneta_suspend(struct device *device)
        struct net_device *dev = dev_get_drvdata(device);
        struct mvneta_port *pp = netdev_priv(dev);
 
+       rtnl_lock();
        if (netif_running(dev))
                mvneta_stop(dev);
+       rtnl_unlock();
        netif_device_detach(dev);
        clk_disable_unprepare(pp->clk_bus);
        clk_disable_unprepare(pp->clk);
@@ -4456,14 +4582,13 @@ static int mvneta_resume(struct device *device)
                return err;
        }
 
-       if (pp->use_inband_status)
-               mvneta_fixed_link_update(pp, dev->phydev);
-
        netif_device_attach(dev);
+       rtnl_lock();
        if (netif_running(dev)) {
                mvneta_open(dev);
                mvneta_set_rx_mode(dev);
        }
+       rtnl_unlock();
 
        return 0;
 }
index 9efe177..9fe8530 100644 (file)
@@ -4287,7 +4287,7 @@ static int sky2_vpd_wait(const struct sky2_hw *hw, int cap, u16 busy)
                        dev_err(&hw->pdev->dev, "VPD cycle timed out\n");
                        return -ETIMEDOUT;
                }
-               mdelay(1);
+               msleep(1);
        }
 
        return 0;
index 54adfd9..29826dd 100644 (file)
@@ -1952,20 +1952,23 @@ static int mtk_hw_init(struct mtk_eth *eth)
        }
        regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
 
-       /* Set GE2 driving and slew rate */
-       regmap_write(eth->pctl, GPIO_DRV_SEL10, 0xa00);
+       if (eth->pctl) {
+               /* Set GE2 driving and slew rate */
+               regmap_write(eth->pctl, GPIO_DRV_SEL10, 0xa00);
 
-       /* set GE2 TDSEL */
-       regmap_write(eth->pctl, GPIO_OD33_CTRL8, 0x5);
+               /* set GE2 TDSEL */
+               regmap_write(eth->pctl, GPIO_OD33_CTRL8, 0x5);
 
-       /* set GE2 TUNE */
-       regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0);
-
-       /* GE1, Force 1000M/FD, FC ON */
-       mtk_w32(eth, MAC_MCR_FIXED_LINK, MTK_MAC_MCR(0));
+               /* set GE2 TUNE */
+               regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0);
+       }
 
-       /* GE2, Force 1000M/FD, FC ON */
-       mtk_w32(eth, MAC_MCR_FIXED_LINK, MTK_MAC_MCR(1));
+       /* Set linkdown as the default for each GMAC. Its own MCR would be set
+        * up with the more appropriate value when mtk_phy_link_adjust call is
+        * being invoked.
+        */
+       for (i = 0; i < MTK_MAC_COUNT; i++)
+               mtk_w32(eth, 0, MTK_MAC_MCR(i));
 
        /* Indicates CDM to parse the MTK special tag from CPU
         * which also is working out for untag packets.
@@ -2537,11 +2540,13 @@ static int mtk_probe(struct platform_device *pdev)
                }
        }
 
-       eth->pctl = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
-                                                   "mediatek,pctl");
-       if (IS_ERR(eth->pctl)) {
-               dev_err(&pdev->dev, "no pctl regmap found\n");
-               return PTR_ERR(eth->pctl);
+       if (eth->soc->required_pctl) {
+               eth->pctl = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+                                                           "mediatek,pctl");
+               if (IS_ERR(eth->pctl)) {
+                       dev_err(&pdev->dev, "no pctl regmap found\n");
+                       return PTR_ERR(eth->pctl);
+               }
        }
 
        for (i = 0; i < 3; i++) {
@@ -2667,17 +2672,20 @@ static int mtk_remove(struct platform_device *pdev)
 
 static const struct mtk_soc_data mt2701_data = {
        .caps = MTK_GMAC1_TRGMII,
-       .required_clks = MT7623_CLKS_BITMAP
+       .required_clks = MT7623_CLKS_BITMAP,
+       .required_pctl = true,
 };
 
 static const struct mtk_soc_data mt7622_data = {
        .caps = MTK_DUAL_GMAC_SHARED_SGMII | MTK_GMAC1_ESW,
-       .required_clks = MT7622_CLKS_BITMAP
+       .required_clks = MT7622_CLKS_BITMAP,
+       .required_pctl = false,
 };
 
 static const struct mtk_soc_data mt7623_data = {
        .caps = MTK_GMAC1_TRGMII,
-       .required_clks = MT7623_CLKS_BITMAP
+       .required_clks = MT7623_CLKS_BITMAP,
+       .required_pctl = true,
 };
 
 const struct of_device_id of_mtk_match[] = {
index a3af466..672b8c3 100644 (file)
@@ -573,10 +573,13 @@ struct mtk_rx_ring {
  * @caps                       Flags shown the extra capability for the SoC
  * @required_clks              Flags shown the bitmap for required clocks on
  *                             the target SoC
+ * @required_pctl              A bool value to show whether the SoC requires
+ *                             the extra setup for those pins used by GMAC.
  */
 struct mtk_soc_data {
        u32             caps;
        u32             required_clks;
+       bool            required_pctl;
 };
 
 /* currently no SoC has more than 2 macs */
index 5f41dc9..1a0c3bf 100644 (file)
@@ -310,6 +310,7 @@ static int mlx4_en_ets_validate(struct mlx4_en_priv *priv, struct ieee_ets *ets)
                }
 
                switch (ets->tc_tsa[i]) {
+               case IEEE_8021QAZ_TSA_VENDOR:
                case IEEE_8021QAZ_TSA_STRICT:
                        break;
                case IEEE_8021QAZ_TSA_ETS:
@@ -347,6 +348,10 @@ static int mlx4_en_config_port_scheduler(struct mlx4_en_priv *priv,
        /* higher TC means higher priority => lower pg */
        for (i = IEEE_8021QAZ_MAX_TCS - 1; i >= 0; i--) {
                switch (ets->tc_tsa[i]) {
+               case IEEE_8021QAZ_TSA_VENDOR:
+                       pg[i] = MLX4_EN_TC_VENDOR;
+                       tc_tx_bw[i] = MLX4_EN_BW_MAX;
+                       break;
                case IEEE_8021QAZ_TSA_STRICT:
                        pg[i] = num_strict++;
                        tc_tx_bw[i] = MLX4_EN_BW_MAX;
index 99051a2..8fc51bc 100644 (file)
@@ -2172,8 +2172,9 @@ static int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
 
                if (mlx4_en_create_rx_ring(priv, &priv->rx_ring[i],
                                           prof->rx_ring_size, priv->stride,
-                                          node))
+                                          node, i))
                        goto err;
+
        }
 
 #ifdef CONFIG_RFS_ACCEL
@@ -3336,6 +3337,13 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
        priv->msg_enable = MLX4_EN_MSG_LEVEL;
 #ifdef CONFIG_MLX4_EN_DCB
        if (!mlx4_is_slave(priv->mdev->dev)) {
+               u8 prio;
+
+               for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; ++prio) {
+                       priv->ets.prio_tc[prio] = prio;
+                       priv->ets.tc_tsa[prio]  = IEEE_8021QAZ_TSA_VENDOR;
+               }
+
                priv->dcbx_cap = DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_HOST |
                        DCB_CAP_DCBX_VER_IEEE;
                priv->flags |= MLX4_EN_DCB_ENABLED;
index 85e28ef..b4d144e 100644 (file)
@@ -262,7 +262,7 @@ void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev)
 
 int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
                           struct mlx4_en_rx_ring **pring,
-                          u32 size, u16 stride, int node)
+                          u32 size, u16 stride, int node, int queue_index)
 {
        struct mlx4_en_dev *mdev = priv->mdev;
        struct mlx4_en_rx_ring *ring;
@@ -286,6 +286,9 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
        ring->log_stride = ffs(ring->stride) - 1;
        ring->buf_size = ring->size * ring->stride + TXBB_SIZE;
 
+       if (xdp_rxq_info_reg(&ring->xdp_rxq, priv->dev, queue_index) < 0)
+               goto err_ring;
+
        tmp = size * roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS *
                                        sizeof(struct mlx4_en_rx_alloc));
        ring->rx_info = vzalloc_node(tmp, node);
@@ -293,7 +296,7 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
                ring->rx_info = vzalloc(tmp);
                if (!ring->rx_info) {
                        err = -ENOMEM;
-                       goto err_ring;
+                       goto err_xdp_info;
                }
        }
 
@@ -317,6 +320,8 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
 err_info:
        vfree(ring->rx_info);
        ring->rx_info = NULL;
+err_xdp_info:
+       xdp_rxq_info_unreg(&ring->xdp_rxq);
 err_ring:
        kfree(ring);
        *pring = NULL;
@@ -440,6 +445,7 @@ void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
                                        lockdep_is_held(&mdev->state_lock));
        if (old_prog)
                bpf_prog_put(old_prog);
+       xdp_rxq_info_unreg(&ring->xdp_rxq);
        mlx4_free_hwq_res(mdev->dev, &ring->wqres, size * stride + TXBB_SIZE);
        vfree(ring->rx_info);
        ring->rx_info = NULL;
@@ -617,6 +623,10 @@ static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb,
        return 0;
 }
 #endif
+
+/* We reach this function only after checking that any of
+ * the (IPv4 | IPv6) bits are set in cqe->status.
+ */
 static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va,
                      netdev_features_t dev_features)
 {
@@ -632,13 +642,11 @@ static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va,
                hdr += sizeof(struct vlan_hdr);
        }
 
-       if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV4))
-               return get_fixed_ipv4_csum(hw_checksum, skb, hdr);
 #if IS_ENABLED(CONFIG_IPV6)
        if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV6))
                return get_fixed_ipv6_csum(hw_checksum, skb, hdr);
 #endif
-       return 0;
+       return get_fixed_ipv4_csum(hw_checksum, skb, hdr);
 }
 
 int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget)
@@ -650,6 +658,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
        int cq_ring = cq->ring;
        bool doorbell_pending;
        struct mlx4_cqe *cqe;
+       struct xdp_buff xdp;
        int polled = 0;
        int index;
 
@@ -664,6 +673,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
        /* Protect accesses to: ring->xdp_prog, priv->mac_hash list */
        rcu_read_lock();
        xdp_prog = rcu_dereference(ring->xdp_prog);
+       xdp.rxq = &ring->xdp_rxq;
        doorbell_pending = 0;
 
        /* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx
@@ -748,7 +758,6 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
                 * read bytes but not past the end of the frag.
                 */
                if (xdp_prog) {
-                       struct xdp_buff xdp;
                        dma_addr_t dma;
                        void *orig_data;
                        u32 act;
@@ -814,33 +823,33 @@ xdp_drop_no_cnt:
                if (likely(dev->features & NETIF_F_RXCSUM)) {
                        if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_TCP |
                                                      MLX4_CQE_STATUS_UDP)) {
-                               if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) &&
-                                   cqe->checksum == cpu_to_be16(0xffff)) {
-                                       bool l2_tunnel = (dev->hw_enc_features & NETIF_F_RXCSUM) &&
-                                               (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_L2_TUNNEL));
-
-                                       ip_summed = CHECKSUM_UNNECESSARY;
-                                       hash_type = PKT_HASH_TYPE_L4;
-                                       if (l2_tunnel)
-                                               skb->csum_level = 1;
-                                       ring->csum_ok++;
-                               } else {
+                               bool l2_tunnel;
+
+                               if (!((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) &&
+                                     cqe->checksum == cpu_to_be16(0xffff)))
                                        goto csum_none;
-                               }
+
+                               l2_tunnel = (dev->hw_enc_features & NETIF_F_RXCSUM) &&
+                                       (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_L2_TUNNEL));
+                               ip_summed = CHECKSUM_UNNECESSARY;
+                               hash_type = PKT_HASH_TYPE_L4;
+                               if (l2_tunnel)
+                                       skb->csum_level = 1;
+                               ring->csum_ok++;
                        } else {
-                               if (priv->flags & MLX4_EN_FLAG_RX_CSUM_NON_TCP_UDP &&
-                                   (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV4 |
-                                                              MLX4_CQE_STATUS_IPV6))) {
-                                       if (check_csum(cqe, skb, va, dev->features)) {
-                                               goto csum_none;
-                                       } else {
-                                               ip_summed = CHECKSUM_COMPLETE;
-                                               hash_type = PKT_HASH_TYPE_L3;
-                                               ring->csum_complete++;
-                                       }
-                               } else {
+                               if (!(priv->flags & MLX4_EN_FLAG_RX_CSUM_NON_TCP_UDP &&
+                                     (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV4 |
+#if IS_ENABLED(CONFIG_IPV6)
+                                                                MLX4_CQE_STATUS_IPV6))))
+#else
+                                                                0))))
+#endif
                                        goto csum_none;
-                               }
+                               if (check_csum(cqe, skb, va, dev->features))
+                                       goto csum_none;
+                               ip_summed = CHECKSUM_COMPLETE;
+                               hash_type = PKT_HASH_TYPE_L3;
+                               ring->csum_complete++;
                        }
                } else {
 csum_none:
index 2b72677..f470ae3 100644 (file)
@@ -46,6 +46,7 @@
 #endif
 #include <linux/cpu_rmap.h>
 #include <linux/ptp_clock_kernel.h>
+#include <net/xdp.h>
 
 #include <linux/mlx4/device.h>
 #include <linux/mlx4/qp.h>
@@ -356,6 +357,7 @@ struct mlx4_en_rx_ring {
        unsigned long dropped;
        int hwtstamp_rx_filter;
        cpumask_var_t affinity_mask;
+       struct xdp_rxq_info xdp_rxq;
 };
 
 struct mlx4_en_cq {
@@ -479,6 +481,7 @@ struct mlx4_en_frag_info {
 #define MLX4_EN_BW_MIN 1
 #define MLX4_EN_BW_MAX 100 /* Utilize 100% of the line */
 
+#define MLX4_EN_TC_VENDOR 0
 #define MLX4_EN_TC_ETS 7
 
 enum dcb_pfc_type {
@@ -719,7 +722,7 @@ void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev);
 void mlx4_en_recover_from_oom(struct mlx4_en_priv *priv);
 int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
                           struct mlx4_en_rx_ring **pring,
-                          u32 size, u16 stride, int node);
+                          u32 size, u16 stride, int node, int queue_index);
 void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
                             struct mlx4_en_rx_ring **pring,
                             u32 size, u16 stride);
index c7c0764..2e84f10 100644 (file)
@@ -1103,30 +1103,16 @@ EXPORT_SYMBOL_GPL(mlx4_fmr_enable);
 void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr,
                    u32 *lkey, u32 *rkey)
 {
-       struct mlx4_cmd_mailbox *mailbox;
-       int err;
-
        if (!fmr->maps)
                return;
 
-       fmr->maps = 0;
+       /* To unmap: it is sufficient to take back ownership from HW */
+       *(u8 *)fmr->mpt = MLX4_MPT_STATUS_SW;
 
-       mailbox = mlx4_alloc_cmd_mailbox(dev);
-       if (IS_ERR(mailbox)) {
-               err = PTR_ERR(mailbox);
-               pr_warn("mlx4_ib: mlx4_alloc_cmd_mailbox failed (%d)\n", err);
-               return;
-       }
+       /* Make sure MPT status is visible */
+       wmb();
 
-       err = mlx4_HW2SW_MPT(dev, NULL,
-                            key_to_hw_index(fmr->mr.key) &
-                            (dev->caps.num_mpts - 1));
-       mlx4_free_cmd_mailbox(dev, mailbox);
-       if (err) {
-               pr_warn("mlx4_ib: mlx4_HW2SW_MPT failed (%d)\n", err);
-               return;
-       }
-       fmr->mr.enabled = MLX4_MPT_EN_SW;
+       fmr->maps = 0;
 }
 EXPORT_SYMBOL_GPL(mlx4_fmr_unmap);
 
@@ -1136,6 +1122,22 @@ int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr)
 
        if (fmr->maps)
                return -EBUSY;
+       if (fmr->mr.enabled == MLX4_MPT_EN_HW) {
+               /* In case of FMR was enabled and unmapped
+                * make sure to give ownership of MPT back to HW
+                * so HW2SW_MPT command will success.
+                */
+               *(u8 *)fmr->mpt = MLX4_MPT_STATUS_SW;
+               /* Make sure MPT status is visible before changing MPT fields */
+               wmb();
+               fmr->mpt->length = 0;
+               fmr->mpt->start  = 0;
+               /* Make sure MPT data is visible after changing MPT status */
+               wmb();
+               *(u8 *)fmr->mpt = MLX4_MPT_STATUS_HW;
+               /* make sure MPT status is visible */
+               wmb();
+       }
 
        ret = mlx4_mr_free(dev, &fmr->mr);
        if (ret)
index 1fffdeb..e9a1fbc 100644 (file)
@@ -362,7 +362,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
        case MLX5_CMD_OP_QUERY_VPORT_COUNTER:
        case MLX5_CMD_OP_ALLOC_Q_COUNTER:
        case MLX5_CMD_OP_QUERY_Q_COUNTER:
-       case MLX5_CMD_OP_SET_RATE_LIMIT:
+       case MLX5_CMD_OP_SET_PP_RATE_LIMIT:
        case MLX5_CMD_OP_QUERY_RATE_LIMIT:
        case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
        case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
@@ -505,7 +505,7 @@ const char *mlx5_command_str(int command)
        MLX5_COMMAND_STR_CASE(ALLOC_Q_COUNTER);
        MLX5_COMMAND_STR_CASE(DEALLOC_Q_COUNTER);
        MLX5_COMMAND_STR_CASE(QUERY_Q_COUNTER);
-       MLX5_COMMAND_STR_CASE(SET_RATE_LIMIT);
+       MLX5_COMMAND_STR_CASE(SET_PP_RATE_LIMIT);
        MLX5_COMMAND_STR_CASE(QUERY_RATE_LIMIT);
        MLX5_COMMAND_STR_CASE(CREATE_SCHEDULING_ELEMENT);
        MLX5_COMMAND_STR_CASE(DESTROY_SCHEDULING_ELEMENT);
index c0872b3..5299310 100644 (file)
@@ -46,6 +46,7 @@
 #include <linux/mlx5/transobj.h>
 #include <linux/rhashtable.h>
 #include <net/switchdev.h>
+#include <net/xdp.h>
 #include "wq.h"
 #include "mlx5_core.h"
 #include "en_stats.h"
@@ -82,6 +83,9 @@
        max_t(u32, MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev), req)
 #define MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev)       MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, 6)
 #define MLX5_MPWRQ_CQE_CMPRS_LOG_STRIDE_SZ(mdev) MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, 8)
+#define MLX5E_MPWQE_STRIDE_SZ(mdev, cqe_cmprs) \
+       (cqe_cmprs ? MLX5_MPWRQ_CQE_CMPRS_LOG_STRIDE_SZ(mdev) : \
+       MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev))
 
 #define MLX5_MPWRQ_LOG_WQE_SZ                  18
 #define MLX5_MPWRQ_WQE_PAGE_ORDER  (MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT > 0 ? \
@@ -568,6 +572,9 @@ struct mlx5e_rq {
        u32                    rqn;
        struct mlx5_core_dev  *mdev;
        struct mlx5_core_mkey  umr_mkey;
+
+       /* XDP read-mostly */
+       struct xdp_rxq_info    xdp_rxq;
 } ____cacheline_aligned_in_smp;
 
 struct mlx5e_channel {
@@ -590,6 +597,7 @@ struct mlx5e_channel {
        struct mlx5_core_dev      *mdev;
        struct hwtstamp_config    *tstamp;
        int                        ix;
+       int                        cpu;
 };
 
 struct mlx5e_channels {
@@ -935,8 +943,9 @@ void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params,
                                 u8 cq_period_mode);
 void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params,
                                 u8 cq_period_mode);
-void mlx5e_set_rq_type_params(struct mlx5_core_dev *mdev,
-                             struct mlx5e_params *params, u8 rq_type);
+void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,
+                              struct mlx5e_params *params,
+                              u8 rq_type);
 
 static inline bool mlx5e_tunnel_inner_ft_supported(struct mlx5_core_dev *mdev)
 {
index c6d90b6..9bcf38f 100644 (file)
@@ -274,6 +274,7 @@ int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets)
 static int mlx5e_dbcnl_validate_ets(struct net_device *netdev,
                                    struct ieee_ets *ets)
 {
+       bool have_ets_tc = false;
        int bw_sum = 0;
        int i;
 
@@ -288,11 +289,14 @@ static int mlx5e_dbcnl_validate_ets(struct net_device *netdev,
        }
 
        /* Validate Bandwidth Sum */
-       for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
-               if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS)
+       for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+               if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) {
+                       have_ets_tc = true;
                        bw_sum += ets->tc_tx_bw[i];
+               }
+       }
 
-       if (bw_sum != 0 && bw_sum != 100) {
+       if (have_ets_tc && bw_sum != 100) {
                netdev_err(netdev,
                           "Failed to validate ETS: BW sum is illegal\n");
                return -EINVAL;
index 23425f0..8f05efa 100644 (file)
@@ -1523,8 +1523,10 @@ int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val
        new_channels.params = priv->channels.params;
        MLX5E_SET_PFLAG(&new_channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS, new_val);
 
-       mlx5e_set_rq_type_params(priv->mdev, &new_channels.params,
-                                new_channels.params.rq_wq_type);
+       new_channels.params.mpwqe_log_stride_sz =
+               MLX5E_MPWQE_STRIDE_SZ(priv->mdev, new_val);
+       new_channels.params.mpwqe_log_num_strides =
+               MLX5_MPWRQ_LOG_WQE_SZ - new_channels.params.mpwqe_log_stride_sz;
 
        if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
                priv->channels.params = new_channels.params;
@@ -1536,6 +1538,10 @@ int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val
                return err;
 
        mlx5e_switch_priv_channels(priv, &new_channels, NULL);
+       mlx5e_dbg(DRV, priv, "MLX5E: RxCqeCmprss was turned %s\n",
+                 MLX5E_GET_PFLAG(&priv->channels.params,
+                                 MLX5E_PFLAG_RX_CQE_COMPRESS) ? "ON" : "OFF");
+
        return 0;
 }
 
index 0f5c012..539bd1d 100644 (file)
@@ -71,11 +71,6 @@ struct mlx5e_channel_param {
        struct mlx5e_cq_param      icosq_cq;
 };
 
-static int mlx5e_get_node(struct mlx5e_priv *priv, int ix)
-{
-       return pci_irq_get_node(priv->mdev->pdev, MLX5_EQ_VEC_COMP_BASE + ix);
-}
-
 static bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
 {
        return MLX5_CAP_GEN(mdev, striding_rq) &&
@@ -83,8 +78,8 @@ static bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
                MLX5_CAP_ETH(mdev, reg_umr_sq);
 }
 
-void mlx5e_set_rq_type_params(struct mlx5_core_dev *mdev,
-                             struct mlx5e_params *params, u8 rq_type)
+void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,
+                              struct mlx5e_params *params, u8 rq_type)
 {
        params->rq_wq_type = rq_type;
        params->lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
@@ -93,10 +88,8 @@ void mlx5e_set_rq_type_params(struct mlx5_core_dev *mdev,
                params->log_rq_size = is_kdump_kernel() ?
                        MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW :
                        MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW;
-               params->mpwqe_log_stride_sz =
-                       MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS) ?
-                       MLX5_MPWRQ_CQE_CMPRS_LOG_STRIDE_SZ(mdev) :
-                       MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev);
+               params->mpwqe_log_stride_sz = MLX5E_MPWQE_STRIDE_SZ(mdev,
+                       MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS));
                params->mpwqe_log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ -
                        params->mpwqe_log_stride_sz;
                break;
@@ -120,13 +113,14 @@ void mlx5e_set_rq_type_params(struct mlx5_core_dev *mdev,
                       MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS));
 }
 
-static void mlx5e_set_rq_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
+static void mlx5e_set_rq_params(struct mlx5_core_dev *mdev,
+                               struct mlx5e_params *params)
 {
        u8 rq_type = mlx5e_check_fragmented_striding_rq_cap(mdev) &&
                    !params->xdp_prog && !MLX5_IPSEC_DEV(mdev) ?
                    MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ :
                    MLX5_WQ_TYPE_LINKED_LIST;
-       mlx5e_set_rq_type_params(mdev, params, rq_type);
+       mlx5e_init_rq_type_params(mdev, params, rq_type);
 }
 
 static void mlx5e_update_carrier(struct mlx5e_priv *priv)
@@ -444,17 +438,16 @@ static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq,
        int wq_sz = mlx5_wq_ll_get_size(&rq->wq);
        int mtt_sz = mlx5e_get_wqe_mtt_sz();
        int mtt_alloc = mtt_sz + MLX5_UMR_ALIGN - 1;
-       int node = mlx5e_get_node(c->priv, c->ix);
        int i;
 
        rq->mpwqe.info = kzalloc_node(wq_sz * sizeof(*rq->mpwqe.info),
-                                       GFP_KERNEL, node);
+                                     GFP_KERNEL, cpu_to_node(c->cpu));
        if (!rq->mpwqe.info)
                goto err_out;
 
        /* We allocate more than mtt_sz as we will align the pointer */
-       rq->mpwqe.mtt_no_align = kzalloc_node(mtt_alloc * wq_sz,
-                                       GFP_KERNEL, node);
+       rq->mpwqe.mtt_no_align = kzalloc_node(mtt_alloc * wq_sz, GFP_KERNEL,
+                                       cpu_to_node(c->cpu));
        if (unlikely(!rq->mpwqe.mtt_no_align))
                goto err_free_wqe_info;
 
@@ -562,7 +555,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
        int err;
        int i;
 
-       rqp->wq.db_numa_node = mlx5e_get_node(c->priv, c->ix);
+       rqp->wq.db_numa_node = cpu_to_node(c->cpu);
 
        err = mlx5_wq_ll_create(mdev, &rqp->wq, rqc_wq, &rq->wq,
                                &rq->wq_ctrl);
@@ -589,6 +582,9 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
                goto err_rq_wq_destroy;
        }
 
+       if (xdp_rxq_info_reg(&rq->xdp_rxq, rq->netdev, rq->ix) < 0)
+               goto err_rq_wq_destroy;
+
        rq->buff.map_dir = rq->xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
        rq->buff.headroom = params->rq_headroom;
 
@@ -629,8 +625,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
        default: /* MLX5_WQ_TYPE_LINKED_LIST */
                rq->wqe.frag_info =
                        kzalloc_node(wq_sz * sizeof(*rq->wqe.frag_info),
-                                    GFP_KERNEL,
-                                    mlx5e_get_node(c->priv, c->ix));
+                                    GFP_KERNEL, cpu_to_node(c->cpu));
                if (!rq->wqe.frag_info) {
                        err = -ENOMEM;
                        goto err_rq_wq_destroy;
@@ -695,6 +690,7 @@ err_destroy_umr_mkey:
 err_rq_wq_destroy:
        if (rq->xdp_prog)
                bpf_prog_put(rq->xdp_prog);
+       xdp_rxq_info_unreg(&rq->xdp_rxq);
        mlx5_wq_destroy(&rq->wq_ctrl);
 
        return err;
@@ -707,6 +703,8 @@ static void mlx5e_free_rq(struct mlx5e_rq *rq)
        if (rq->xdp_prog)
                bpf_prog_put(rq->xdp_prog);
 
+       xdp_rxq_info_unreg(&rq->xdp_rxq);
+
        switch (rq->wq_type) {
        case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
                mlx5e_rq_free_mpwqe_info(rq);
@@ -1000,13 +998,13 @@ static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c,
        sq->uar_map   = mdev->mlx5e_res.bfreg.map;
        sq->min_inline_mode = params->tx_min_inline_mode;
 
-       param->wq.db_numa_node = mlx5e_get_node(c->priv, c->ix);
+       param->wq.db_numa_node = cpu_to_node(c->cpu);
        err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq, &sq->wq_ctrl);
        if (err)
                return err;
        sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
 
-       err = mlx5e_alloc_xdpsq_db(sq, mlx5e_get_node(c->priv, c->ix));
+       err = mlx5e_alloc_xdpsq_db(sq, cpu_to_node(c->cpu));
        if (err)
                goto err_sq_wq_destroy;
 
@@ -1053,13 +1051,13 @@ static int mlx5e_alloc_icosq(struct mlx5e_channel *c,
        sq->channel   = c;
        sq->uar_map   = mdev->mlx5e_res.bfreg.map;
 
-       param->wq.db_numa_node = mlx5e_get_node(c->priv, c->ix);
+       param->wq.db_numa_node = cpu_to_node(c->cpu);
        err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq, &sq->wq_ctrl);
        if (err)
                return err;
        sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
 
-       err = mlx5e_alloc_icosq_db(sq, mlx5e_get_node(c->priv, c->ix));
+       err = mlx5e_alloc_icosq_db(sq, cpu_to_node(c->cpu));
        if (err)
                goto err_sq_wq_destroy;
 
@@ -1126,13 +1124,13 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
        if (MLX5_IPSEC_DEV(c->priv->mdev))
                set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state);
 
-       param->wq.db_numa_node = mlx5e_get_node(c->priv, c->ix);
+       param->wq.db_numa_node = cpu_to_node(c->cpu);
        err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq, &sq->wq_ctrl);
        if (err)
                return err;
        sq->wq.db    = &sq->wq.db[MLX5_SND_DBR];
 
-       err = mlx5e_alloc_txqsq_db(sq, mlx5e_get_node(c->priv, c->ix));
+       err = mlx5e_alloc_txqsq_db(sq, cpu_to_node(c->cpu));
        if (err)
                goto err_sq_wq_destroy;
 
@@ -1504,8 +1502,8 @@ static int mlx5e_alloc_cq(struct mlx5e_channel *c,
        struct mlx5_core_dev *mdev = c->priv->mdev;
        int err;
 
-       param->wq.buf_numa_node = mlx5e_get_node(c->priv, c->ix);
-       param->wq.db_numa_node  = mlx5e_get_node(c->priv, c->ix);
+       param->wq.buf_numa_node = cpu_to_node(c->cpu);
+       param->wq.db_numa_node  = cpu_to_node(c->cpu);
        param->eq_ix   = c->ix;
 
        err = mlx5e_alloc_cq_common(mdev, param, cq);
@@ -1604,6 +1602,11 @@ static void mlx5e_close_cq(struct mlx5e_cq *cq)
        mlx5e_free_cq(cq);
 }
 
+static int mlx5e_get_cpu(struct mlx5e_priv *priv, int ix)
+{
+       return cpumask_first(priv->mdev->priv.irq_info[ix].mask);
+}
+
 static int mlx5e_open_tx_cqs(struct mlx5e_channel *c,
                             struct mlx5e_params *params,
                             struct mlx5e_channel_param *cparam)
@@ -1752,12 +1755,13 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
 {
        struct mlx5e_cq_moder icocq_moder = {0, 0};
        struct net_device *netdev = priv->netdev;
+       int cpu = mlx5e_get_cpu(priv, ix);
        struct mlx5e_channel *c;
        unsigned int irq;
        int err;
        int eqn;
 
-       c = kzalloc_node(sizeof(*c), GFP_KERNEL, mlx5e_get_node(priv, ix));
+       c = kzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu));
        if (!c)
                return -ENOMEM;
 
@@ -1765,6 +1769,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
        c->mdev     = priv->mdev;
        c->tstamp   = &priv->tstamp;
        c->ix       = ix;
+       c->cpu      = cpu;
        c->pdev     = &priv->mdev->pdev->dev;
        c->netdev   = priv->netdev;
        c->mkey_be  = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key);
@@ -1853,8 +1858,7 @@ static void mlx5e_activate_channel(struct mlx5e_channel *c)
        for (tc = 0; tc < c->num_tc; tc++)
                mlx5e_activate_txqsq(&c->sq[tc]);
        mlx5e_activate_rq(&c->rq);
-       netif_set_xps_queue(c->netdev,
-               mlx5_get_vector_affinity(c->priv->mdev, c->ix), c->ix);
+       netif_set_xps_queue(c->netdev, get_cpu_mask(c->cpu), c->ix);
 }
 
 static void mlx5e_deactivate_channel(struct mlx5e_channel *c)
@@ -2768,6 +2772,9 @@ static int mlx5e_alloc_drop_rq(struct mlx5_core_dev *mdev,
        if (err)
                return err;
 
+       /* Mark as unused given "Drop-RQ" packets never reach XDP */
+       xdp_rxq_info_unused(&rq->xdp_rxq);
+
        rq->mdev = mdev;
 
        return 0;
@@ -3679,6 +3686,7 @@ static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv,
                                                     struct sk_buff *skb,
                                                     netdev_features_t features)
 {
+       unsigned int offset = 0;
        struct udphdr *udph;
        u8 proto;
        u16 port;
@@ -3688,7 +3696,7 @@ static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv,
                proto = ip_hdr(skb)->protocol;
                break;
        case htons(ETH_P_IPV6):
-               proto = ipv6_hdr(skb)->nexthdr;
+               proto = ipv6_find_hdr(skb, &offset, -1, NULL, NULL);
                break;
        default:
                goto out;
index 2c43606..c6a77f8 100644 (file)
@@ -190,6 +190,63 @@ int mlx5e_attr_get(struct net_device *dev, struct switchdev_attr *attr)
        return 0;
 }
 
+static void mlx5e_sqs2vport_stop(struct mlx5_eswitch *esw,
+                                struct mlx5_eswitch_rep *rep)
+{
+       struct mlx5e_rep_sq *rep_sq, *tmp;
+       struct mlx5e_rep_priv *rpriv;
+
+       if (esw->mode != SRIOV_OFFLOADS)
+               return;
+
+       rpriv = mlx5e_rep_to_rep_priv(rep);
+       list_for_each_entry_safe(rep_sq, tmp, &rpriv->vport_sqs_list, list) {
+               mlx5_eswitch_del_send_to_vport_rule(rep_sq->send_to_vport_rule);
+               list_del(&rep_sq->list);
+               kfree(rep_sq);
+       }
+}
+
+static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw,
+                                struct mlx5_eswitch_rep *rep,
+                                u16 *sqns_array, int sqns_num)
+{
+       struct mlx5_flow_handle *flow_rule;
+       struct mlx5e_rep_priv *rpriv;
+       struct mlx5e_rep_sq *rep_sq;
+       int err;
+       int i;
+
+       if (esw->mode != SRIOV_OFFLOADS)
+               return 0;
+
+       rpriv = mlx5e_rep_to_rep_priv(rep);
+       for (i = 0; i < sqns_num; i++) {
+               rep_sq = kzalloc(sizeof(*rep_sq), GFP_KERNEL);
+               if (!rep_sq) {
+                       err = -ENOMEM;
+                       goto out_err;
+               }
+
+               /* Add re-inject rule to the PF/representor sqs */
+               flow_rule = mlx5_eswitch_add_send_to_vport_rule(esw,
+                                                               rep->vport,
+                                                               sqns_array[i]);
+               if (IS_ERR(flow_rule)) {
+                       err = PTR_ERR(flow_rule);
+                       kfree(rep_sq);
+                       goto out_err;
+               }
+               rep_sq->send_to_vport_rule = flow_rule;
+               list_add(&rep_sq->list, &rpriv->vport_sqs_list);
+       }
+       return 0;
+
+out_err:
+       mlx5e_sqs2vport_stop(esw, rep);
+       return err;
+}
+
 int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv)
 {
        struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
@@ -210,7 +267,7 @@ int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv)
                        sqs[num_sqs++] = c->sq[tc].sqn;
        }
 
-       err = mlx5_eswitch_sqs2vport_start(esw, rep, sqs, num_sqs);
+       err = mlx5e_sqs2vport_start(esw, rep, sqs, num_sqs);
        kfree(sqs);
 
 out:
@@ -225,7 +282,7 @@ void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv)
        struct mlx5e_rep_priv *rpriv = priv->ppriv;
        struct mlx5_eswitch_rep *rep = rpriv->rep;
 
-       mlx5_eswitch_sqs2vport_stop(esw, rep);
+       mlx5e_sqs2vport_stop(esw, rep);
 }
 
 static void mlx5e_rep_neigh_update_init_interval(struct mlx5e_rep_priv *rpriv)
@@ -238,7 +295,7 @@ static void mlx5e_rep_neigh_update_init_interval(struct mlx5e_rep_priv *rpriv)
 #endif
        unsigned long ipv4_interval = NEIGH_VAR(&arp_tbl.parms,
                                                DELAY_PROBE_TIME);
-       struct net_device *netdev = rpriv->rep->netdev;
+       struct net_device *netdev = rpriv->netdev;
        struct mlx5e_priv *priv = netdev_priv(netdev);
 
        rpriv->neigh_update.min_interval = min_t(unsigned long, ipv6_interval, ipv4_interval);
@@ -259,7 +316,7 @@ static void mlx5e_rep_neigh_stats_work(struct work_struct *work)
 {
        struct mlx5e_rep_priv *rpriv = container_of(work, struct mlx5e_rep_priv,
                                                    neigh_update.neigh_stats_work.work);
-       struct net_device *netdev = rpriv->rep->netdev;
+       struct net_device *netdev = rpriv->netdev;
        struct mlx5e_priv *priv = netdev_priv(netdev);
        struct mlx5e_neigh_hash_entry *nhe;
 
@@ -355,7 +412,7 @@ static int mlx5e_rep_netevent_event(struct notifier_block *nb,
        struct mlx5e_rep_priv *rpriv = container_of(nb, struct mlx5e_rep_priv,
                                                    neigh_update.netevent_nb);
        struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
-       struct net_device *netdev = rpriv->rep->netdev;
+       struct net_device *netdev = rpriv->netdev;
        struct mlx5e_priv *priv = netdev_priv(netdev);
        struct mlx5e_neigh_hash_entry *nhe = NULL;
        struct mlx5e_neigh m_neigh = {};
@@ -483,7 +540,7 @@ out_err:
 static void mlx5e_rep_neigh_cleanup(struct mlx5e_rep_priv *rpriv)
 {
        struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
-       struct mlx5e_priv *priv = netdev_priv(rpriv->rep->netdev);
+       struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
 
        unregister_netevent_notifier(&neigh_update->netevent_nb);
 
@@ -904,7 +961,7 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
                err = PTR_ERR(flow_rule);
                goto err_destroy_direct_tirs;
        }
-       rep->vport_rx_rule = flow_rule;
+       rpriv->vport_rx_rule = flow_rule;
 
        err = mlx5e_tc_init(priv);
        if (err)
@@ -913,7 +970,7 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
        return 0;
 
 err_del_flow_rule:
-       mlx5_del_flow_rules(rep->vport_rx_rule);
+       mlx5_del_flow_rules(rpriv->vport_rx_rule);
 err_destroy_direct_tirs:
        mlx5e_destroy_direct_tirs(priv);
 err_destroy_direct_rqts:
@@ -924,10 +981,9 @@ err_destroy_direct_rqts:
 static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
 {
        struct mlx5e_rep_priv *rpriv = priv->ppriv;
-       struct mlx5_eswitch_rep *rep = rpriv->rep;
 
        mlx5e_tc_cleanup(priv);
-       mlx5_del_flow_rules(rep->vport_rx_rule);
+       mlx5_del_flow_rules(rpriv->vport_rx_rule);
        mlx5e_destroy_direct_tirs(priv);
        mlx5e_destroy_direct_rqts(priv);
 }
@@ -967,10 +1023,10 @@ static const struct mlx5e_profile mlx5e_rep_profile = {
 /* e-Switch vport representors */
 
 static int
-mlx5e_nic_rep_load(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep)
+mlx5e_nic_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
 {
-       struct mlx5e_priv *priv = netdev_priv(rep->netdev);
-       struct mlx5e_rep_priv *rpriv = priv->ppriv;
+       struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
+       struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
 
        int err;
 
@@ -992,10 +1048,10 @@ err_remove_sqs:
 }
 
 static void
-mlx5e_nic_rep_unload(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep)
+mlx5e_nic_rep_unload(struct mlx5_eswitch_rep *rep)
 {
-       struct mlx5e_priv *priv = netdev_priv(rep->netdev);
-       struct mlx5e_rep_priv *rpriv = priv->ppriv;
+       struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
+       struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
 
        if (test_bit(MLX5E_STATE_OPENED, &priv->state))
                mlx5e_remove_sqs_fwd_rules(priv);
@@ -1008,8 +1064,9 @@ mlx5e_nic_rep_unload(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep)
 }
 
 static int
-mlx5e_vport_rep_load(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep)
+mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
 {
+       struct mlx5e_rep_priv *uplink_rpriv;
        struct mlx5e_rep_priv *rpriv;
        struct net_device *netdev;
        struct mlx5e_priv *upriv;
@@ -1019,7 +1076,7 @@ mlx5e_vport_rep_load(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep)
        if (!rpriv)
                return -ENOMEM;
 
-       netdev = mlx5e_create_netdev(esw->dev, &mlx5e_rep_profile, rpriv);
+       netdev = mlx5e_create_netdev(dev, &mlx5e_rep_profile, rpriv);
        if (!netdev) {
                pr_warn("Failed to create representor netdev for vport %d\n",
                        rep->vport);
@@ -1027,8 +1084,10 @@ mlx5e_vport_rep_load(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep)
                return -EINVAL;
        }
 
-       rep->netdev = netdev;
+       rpriv->netdev = netdev;
        rpriv->rep = rep;
+       rep->rep_if[REP_ETH].priv = rpriv;
+       INIT_LIST_HEAD(&rpriv->vport_sqs_list);
 
        err = mlx5e_attach_netdev(netdev_priv(netdev));
        if (err) {
@@ -1044,7 +1103,8 @@ mlx5e_vport_rep_load(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep)
                goto err_detach_netdev;
        }
 
-       upriv = netdev_priv(mlx5_eswitch_get_uplink_netdev(esw));
+       uplink_rpriv = mlx5_eswitch_get_uplink_priv(dev->priv.eswitch, REP_ETH);
+       upriv = netdev_priv(uplink_rpriv->netdev);
        err = tc_setup_cb_egdev_register(netdev, mlx5e_setup_tc_block_cb,
                                         upriv);
        if (err)
@@ -1076,16 +1136,19 @@ err_destroy_netdev:
 }
 
 static void
-mlx5e_vport_rep_unload(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep)
+mlx5e_vport_rep_unload(struct mlx5_eswitch_rep *rep)
 {
-       struct net_device *netdev = rep->netdev;
+       struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
+       struct net_device *netdev = rpriv->netdev;
        struct mlx5e_priv *priv = netdev_priv(netdev);
-       struct mlx5e_rep_priv *rpriv = priv->ppriv;
+       struct mlx5e_rep_priv *uplink_rpriv;
        void *ppriv = priv->ppriv;
        struct mlx5e_priv *upriv;
 
-       unregister_netdev(rep->netdev);
-       upriv = netdev_priv(mlx5_eswitch_get_uplink_netdev(esw));
+       unregister_netdev(netdev);
+       uplink_rpriv = mlx5_eswitch_get_uplink_priv(priv->mdev->priv.eswitch,
+                                                   REP_ETH);
+       upriv = netdev_priv(uplink_rpriv->netdev);
        tc_setup_cb_egdev_unregister(netdev, mlx5e_setup_tc_block_cb,
                                     upriv);
        mlx5e_rep_neigh_cleanup(rpriv);
@@ -1100,18 +1163,13 @@ static void mlx5e_rep_register_vf_vports(struct mlx5e_priv *priv)
        struct mlx5_eswitch *esw   = mdev->priv.eswitch;
        int total_vfs = MLX5_TOTAL_VPORTS(mdev);
        int vport;
-       u8 mac[ETH_ALEN];
-
-       mlx5_query_nic_vport_mac_address(mdev, 0, mac);
 
        for (vport = 1; vport < total_vfs; vport++) {
-               struct mlx5_eswitch_rep rep;
+               struct mlx5_eswitch_rep_if rep_if = {};
 
-               rep.load = mlx5e_vport_rep_load;
-               rep.unload = mlx5e_vport_rep_unload;
-               rep.vport = vport;
-               ether_addr_copy(rep.hw_id, mac);
-               mlx5_eswitch_register_vport_rep(esw, vport, &rep);
+               rep_if.load = mlx5e_vport_rep_load;
+               rep_if.unload = mlx5e_vport_rep_unload;
+               mlx5_eswitch_register_vport_rep(esw, vport, &rep_if, REP_ETH);
        }
 }
 
@@ -1123,21 +1181,24 @@ static void mlx5e_rep_unregister_vf_vports(struct mlx5e_priv *priv)
        int vport;
 
        for (vport = 1; vport < total_vfs; vport++)
-               mlx5_eswitch_unregister_vport_rep(esw, vport);
+               mlx5_eswitch_unregister_vport_rep(esw, vport, REP_ETH);
 }
 
 void mlx5e_register_vport_reps(struct mlx5e_priv *priv)
 {
        struct mlx5_core_dev *mdev = priv->mdev;
        struct mlx5_eswitch *esw   = mdev->priv.eswitch;
-       struct mlx5_eswitch_rep rep;
+       struct mlx5_eswitch_rep_if rep_if;
+       struct mlx5e_rep_priv *rpriv;
+
+       rpriv = priv->ppriv;
+       rpriv->netdev = priv->netdev;
 
-       mlx5_query_nic_vport_mac_address(mdev, 0, rep.hw_id);
-       rep.load = mlx5e_nic_rep_load;
-       rep.unload = mlx5e_nic_rep_unload;
-       rep.vport = FDB_UPLINK_VPORT;
-       rep.netdev = priv->netdev;
-       mlx5_eswitch_register_vport_rep(esw, 0, &rep); /* UPLINK PF vport*/
+       rep_if.load = mlx5e_nic_rep_load;
+       rep_if.unload = mlx5e_nic_rep_unload;
+       rep_if.priv = rpriv;
+       INIT_LIST_HEAD(&rpriv->vport_sqs_list);
+       mlx5_eswitch_register_vport_rep(esw, 0, &rep_if, REP_ETH); /* UPLINK PF vport*/
 
        mlx5e_rep_register_vf_vports(priv); /* VFs vports */
 }
@@ -1148,7 +1209,7 @@ void mlx5e_unregister_vport_reps(struct mlx5e_priv *priv)
        struct mlx5_eswitch *esw   = mdev->priv.eswitch;
 
        mlx5e_rep_unregister_vf_vports(priv); /* VFs vports */
-       mlx5_eswitch_unregister_vport_rep(esw, 0); /* UPLINK PF*/
+       mlx5_eswitch_unregister_vport_rep(esw, 0, REP_ETH); /* UPLINK PF*/
 }
 
 void *mlx5e_alloc_nic_rep_priv(struct mlx5_core_dev *mdev)
index 5659ed9..b9b481f 100644 (file)
@@ -56,8 +56,17 @@ struct mlx5e_neigh_update_table {
 struct mlx5e_rep_priv {
        struct mlx5_eswitch_rep *rep;
        struct mlx5e_neigh_update_table neigh_update;
+       struct net_device      *netdev;
+       struct mlx5_flow_handle *vport_rx_rule;
+       struct list_head       vport_sqs_list;
 };
 
+static inline
+struct mlx5e_rep_priv *mlx5e_rep_to_rep_priv(struct mlx5_eswitch_rep *rep)
+{
+       return (struct mlx5e_rep_priv *)rep->rep_if[REP_ETH].priv;
+}
+
 struct mlx5e_neigh {
        struct net_device *dev;
        union {
@@ -124,6 +133,11 @@ struct mlx5e_encap_entry {
        int encap_size;
 };
 
+struct mlx5e_rep_sq {
+       struct mlx5_flow_handle *send_to_vport_rule;
+       struct list_head         list;
+};
+
 void *mlx5e_alloc_nic_rep_priv(struct mlx5_core_dev *mdev);
 void mlx5e_register_vport_reps(struct mlx5e_priv *priv);
 void mlx5e_unregister_vport_reps(struct mlx5e_priv *priv);
index 5b499c7..7b38480 100644 (file)
@@ -812,6 +812,7 @@ static inline int mlx5e_xdp_handle(struct mlx5e_rq *rq,
        xdp_set_data_meta_invalid(&xdp);
        xdp.data_end = xdp.data + *len;
        xdp.data_hard_start = va;
+       xdp.rxq = &rq->xdp_rxq;
 
        act = bpf_prog_run_xdp(prog, &xdp);
        switch (act) {
index 3e03d2e..933275f 100644 (file)
@@ -617,7 +617,8 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
                                                  FLOW_DISSECTOR_KEY_ENC_PORTS,
                                                  f->mask);
                struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
-               struct net_device *up_dev = mlx5_eswitch_get_uplink_netdev(esw);
+               struct mlx5e_rep_priv *uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
+               struct net_device *up_dev = uplink_rpriv->netdev;
                struct mlx5e_priv *up_priv = netdev_priv(up_dev);
 
                /* Full udp dst port must be given */
@@ -1507,6 +1508,7 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
                                   int *out_ttl)
 {
        struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+       struct mlx5e_rep_priv *uplink_rpriv;
        struct rtable *rt;
        struct neighbour *n = NULL;
 
@@ -1520,9 +1522,10 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
 #else
        return -EOPNOTSUPP;
 #endif
+       uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
        /* if the egress device isn't on the same HW e-switch, we use the uplink */
        if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev))
-               *out_dev = mlx5_eswitch_get_uplink_netdev(esw);
+               *out_dev = uplink_rpriv->netdev;
        else
                *out_dev = rt->dst.dev;
 
@@ -1547,6 +1550,7 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
        struct dst_entry *dst;
 
 #if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
+       struct mlx5e_rep_priv *uplink_rpriv;
        struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
        int ret;
 
@@ -1557,9 +1561,10 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
 
        *out_ttl = ip6_dst_hoplimit(dst);
 
+       uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
        /* if the egress device isn't on the same HW e-switch, we use the uplink */
        if (!switchdev_port_same_parent_id(priv->netdev, dst->dev))
-               *out_dev = mlx5_eswitch_get_uplink_netdev(esw);
+               *out_dev = uplink_rpriv->netdev;
        else
                *out_dev = dst->dev;
 #else
@@ -1859,7 +1864,9 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
                              struct mlx5e_tc_flow *flow)
 {
        struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
-       struct net_device *up_dev = mlx5_eswitch_get_uplink_netdev(esw);
+       struct mlx5e_rep_priv *uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw,
+                                                                          REP_ETH);
+       struct net_device *up_dev = uplink_rpriv->netdev;
        unsigned short family = ip_tunnel_info_af(tun_info);
        struct mlx5e_priv *up_priv = netdev_priv(up_dev);
        struct mlx5_esw_flow_attr *attr = flow->esw_attr;
index 6077186..e7e7cef 100644 (file)
@@ -466,7 +466,7 @@ static irqreturn_t mlx5_eq_int(int irq, void *eq_ptr)
                        break;
                case MLX5_EVENT_TYPE_CQ_ERROR:
                        cqn = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff;
-                       mlx5_core_warn(dev, "CQ error on CQN 0x%x, syndrom 0x%x\n",
+                       mlx5_core_warn(dev, "CQ error on CQN 0x%x, syndrome 0x%x\n",
                                       cqn, eqe->data.cq_err.syndrome);
                        mlx5_cq_event(dev, cqn, eqe->type);
                        break;
@@ -775,7 +775,7 @@ err1:
        return err;
 }
 
-int mlx5_stop_eqs(struct mlx5_core_dev *dev)
+void mlx5_stop_eqs(struct mlx5_core_dev *dev)
 {
        struct mlx5_eq_table *table = &dev->priv.eq_table;
        int err;
@@ -784,22 +784,26 @@ int mlx5_stop_eqs(struct mlx5_core_dev *dev)
        if (MLX5_CAP_GEN(dev, pg)) {
                err = mlx5_destroy_unmap_eq(dev, &table->pfault_eq);
                if (err)
-                       return err;
+                       mlx5_core_err(dev, "failed to destroy page fault eq, err(%d)\n",
+                                     err);
        }
 #endif
 
        err = mlx5_destroy_unmap_eq(dev, &table->pages_eq);
        if (err)
-               return err;
+               mlx5_core_err(dev, "failed to destroy pages eq, err(%d)\n",
+                             err);
 
-       mlx5_destroy_unmap_eq(dev, &table->async_eq);
+       err = mlx5_destroy_unmap_eq(dev, &table->async_eq);
+       if (err)
+               mlx5_core_err(dev, "failed to destroy async eq, err(%d)\n",
+                             err);
        mlx5_cmd_use_polling(dev);
 
        err = mlx5_destroy_unmap_eq(dev, &table->cmd_eq);
        if (err)
-               mlx5_cmd_use_events(dev);
-
-       return err;
+               mlx5_core_err(dev, "failed to destroy command eq, err(%d)\n",
+                             err);
 }
 
 int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
index bbb140f..7649e36 100644 (file)
@@ -867,9 +867,10 @@ static int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
        esw_debug(dev, "Create vport[%d] egress ACL log_max_size(%d)\n",
                  vport->vport, MLX5_CAP_ESW_EGRESS_ACL(dev, log_max_ft_size));
 
-       root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_EGRESS);
+       root_ns = mlx5_get_flow_vport_acl_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_EGRESS,
+                                                   vport->vport);
        if (!root_ns) {
-               esw_warn(dev, "Failed to get E-Switch egress flow namespace\n");
+               esw_warn(dev, "Failed to get E-Switch egress flow namespace for vport (%d)\n", vport->vport);
                return -EOPNOTSUPP;
        }
 
@@ -984,9 +985,10 @@ static int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
        esw_debug(dev, "Create vport[%d] ingress ACL log_max_size(%d)\n",
                  vport->vport, MLX5_CAP_ESW_INGRESS_ACL(dev, log_max_ft_size));
 
-       root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS);
+       root_ns = mlx5_get_flow_vport_acl_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS,
+                                                   vport->vport);
        if (!root_ns) {
-               esw_warn(dev, "Failed to get E-Switch ingress flow namespace\n");
+               esw_warn(dev, "Failed to get E-Switch ingress flow namespace for vport (%d)\n", vport->vport);
                return -EOPNOTSUPP;
        }
 
@@ -1290,7 +1292,7 @@ static int esw_create_tsar(struct mlx5_eswitch *esw)
 
        err = mlx5_create_scheduling_element_cmd(dev,
                                                 SCHEDULING_HIERARCHY_E_SWITCH,
-                                                &tsar_ctx,
+                                                tsar_ctx,
                                                 &esw->qos.root_tsar_id);
        if (err) {
                esw_warn(esw->dev, "E-Switch create TSAR failed (%d)\n", err);
@@ -1333,20 +1335,20 @@ static int esw_vport_enable_qos(struct mlx5_eswitch *esw, int vport_num,
        if (vport->qos.enabled)
                return -EEXIST;
 
-       MLX5_SET(scheduling_context, &sched_ctx, element_type,
+       MLX5_SET(scheduling_context, sched_ctx, element_type,
                 SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT);
-       vport_elem = MLX5_ADDR_OF(scheduling_context, &sched_ctx,
+       vport_elem = MLX5_ADDR_OF(scheduling_context, sched_ctx,
                                  element_attributes);
        MLX5_SET(vport_element, vport_elem, vport_number, vport_num);
-       MLX5_SET(scheduling_context, &sched_ctx, parent_element_id,
+       MLX5_SET(scheduling_context, sched_ctx, parent_element_id,
                 esw->qos.root_tsar_id);
-       MLX5_SET(scheduling_context, &sched_ctx, max_average_bw,
+       MLX5_SET(scheduling_context, sched_ctx, max_average_bw,
                 initial_max_rate);
-       MLX5_SET(scheduling_context, &sched_ctx, bw_share, initial_bw_share);
+       MLX5_SET(scheduling_context, sched_ctx, bw_share, initial_bw_share);
 
        err = mlx5_create_scheduling_element_cmd(dev,
                                                 SCHEDULING_HIERARCHY_E_SWITCH,
-                                                &sched_ctx,
+                                                sched_ctx,
                                                 &vport->qos.esw_tsar_ix);
        if (err) {
                esw_warn(esw->dev, "E-Switch create TSAR vport element failed (vport=%d,err=%d)\n",
@@ -1392,22 +1394,22 @@ static int esw_vport_qos_config(struct mlx5_eswitch *esw, int vport_num,
        if (!vport->qos.enabled)
                return -EIO;
 
-       MLX5_SET(scheduling_context, &sched_ctx, element_type,
+       MLX5_SET(scheduling_context, sched_ctx, element_type,
                 SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT);
-       vport_elem = MLX5_ADDR_OF(scheduling_context, &sched_ctx,
+       vport_elem = MLX5_ADDR_OF(scheduling_context, sched_ctx,
                                  element_attributes);
        MLX5_SET(vport_element, vport_elem, vport_number, vport_num);
-       MLX5_SET(scheduling_context, &sched_ctx, parent_element_id,
+       MLX5_SET(scheduling_context, sched_ctx, parent_element_id,
                 esw->qos.root_tsar_id);
-       MLX5_SET(scheduling_context, &sched_ctx, max_average_bw,
+       MLX5_SET(scheduling_context, sched_ctx, max_average_bw,
                 max_rate);
-       MLX5_SET(scheduling_context, &sched_ctx, bw_share, bw_share);
+       MLX5_SET(scheduling_context, sched_ctx, bw_share, bw_share);
        bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW;
        bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_BW_SHARE;
 
        err = mlx5_modify_scheduling_element_cmd(dev,
                                                 SCHEDULING_HIERARCHY_E_SWITCH,
-                                                &sched_ctx,
+                                                sched_ctx,
                                                 vport->qos.esw_tsar_ix,
                                                 bitmask);
        if (err) {
@@ -1644,13 +1646,9 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
                goto abort;
        }
 
-       esw->offloads.vport_reps =
-               kzalloc(total_vports * sizeof(struct mlx5_eswitch_rep),
-                       GFP_KERNEL);
-       if (!esw->offloads.vport_reps) {
-               err = -ENOMEM;
+       err = esw_offloads_init_reps(esw);
+       if (err)
                goto abort;
-       }
 
        hash_init(esw->offloads.encap_tbl);
        hash_init(esw->offloads.mod_hdr_tbl);
@@ -1681,8 +1679,8 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
 abort:
        if (esw->work_queue)
                destroy_workqueue(esw->work_queue);
+       esw_offloads_cleanup_reps(esw);
        kfree(esw->vports);
-       kfree(esw->offloads.vport_reps);
        kfree(esw);
        return err;
 }
@@ -1696,7 +1694,7 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
 
        esw->dev->priv.eswitch = NULL;
        destroy_workqueue(esw->work_queue);
-       kfree(esw->offloads.vport_reps);
+       esw_offloads_cleanup_reps(esw);
        kfree(esw->vports);
        kfree(esw);
 }
index 565c8b7..3b48118 100644 (file)
@@ -45,6 +45,11 @@ enum {
        SRIOV_OFFLOADS
 };
 
+enum {
+       REP_ETH,
+       NUM_REP_TYPES,
+};
+
 #ifdef CONFIG_MLX5_ESWITCH
 
 #define MLX5_MAX_UC_PER_VPORT(dev) \
@@ -133,25 +138,21 @@ struct mlx5_eswitch_fdb {
        };
 };
 
-struct mlx5_esw_sq {
-       struct mlx5_flow_handle *send_to_vport_rule;
-       struct list_head         list;
+struct mlx5_eswitch_rep;
+struct mlx5_eswitch_rep_if {
+       int                    (*load)(struct mlx5_core_dev *dev,
+                                      struct mlx5_eswitch_rep *rep);
+       void                   (*unload)(struct mlx5_eswitch_rep *rep);
+       void                    *priv;
+       bool                   valid;
 };
 
 struct mlx5_eswitch_rep {
-       int                    (*load)(struct mlx5_eswitch *esw,
-                                      struct mlx5_eswitch_rep *rep);
-       void                   (*unload)(struct mlx5_eswitch *esw,
-                                        struct mlx5_eswitch_rep *rep);
+       struct mlx5_eswitch_rep_if rep_if[NUM_REP_TYPES];
        u16                    vport;
        u8                     hw_id[ETH_ALEN];
-       struct net_device      *netdev;
-
-       struct mlx5_flow_handle *vport_rx_rule;
-       struct list_head       vport_sqs_list;
        u16                    vlan;
        u32                    vlan_refcount;
-       bool                   valid;
 };
 
 struct mlx5_esw_offload {
@@ -197,6 +198,8 @@ struct mlx5_eswitch {
 
 void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports);
 int esw_offloads_init(struct mlx5_eswitch *esw, int nvports);
+void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw);
+int esw_offloads_init_reps(struct mlx5_eswitch *esw);
 
 /* E-Switch API */
 int mlx5_eswitch_init(struct mlx5_core_dev *dev);
@@ -221,6 +224,10 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
 int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
                                 int vport,
                                 struct ifla_vf_stats *vf_stats);
+struct mlx5_flow_handle *
+mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport,
+                                   u32 sqn);
+void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule);
 
 struct mlx5_flow_spec;
 struct mlx5_esw_flow_attr;
@@ -257,12 +264,6 @@ struct mlx5_esw_flow_attr {
        struct mlx5e_tc_flow_parse_attr *parse_attr;
 };
 
-int mlx5_eswitch_sqs2vport_start(struct mlx5_eswitch *esw,
-                                struct mlx5_eswitch_rep *rep,
-                                u16 *sqns_array, int sqns_num);
-void mlx5_eswitch_sqs2vport_stop(struct mlx5_eswitch *esw,
-                                struct mlx5_eswitch_rep *rep);
-
 int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode);
 int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode);
 int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode);
@@ -272,10 +273,12 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap);
 int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, u8 *encap);
 void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw,
                                     int vport_index,
-                                    struct mlx5_eswitch_rep *rep);
+                                    struct mlx5_eswitch_rep_if *rep_if,
+                                    u8 rep_type);
 void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw,
-                                      int vport_index);
-struct net_device *mlx5_eswitch_get_uplink_netdev(struct mlx5_eswitch *esw);
+                                      int vport_index,
+                                      u8 rep_type);
+void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type);
 
 int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
                                 struct mlx5_esw_flow_attr *attr);
index 1143d80..99f583a 100644 (file)
@@ -130,7 +130,7 @@ static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
        esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none");
        for (vf_vport = 1; vf_vport < esw->enabled_vports; vf_vport++) {
                rep = &esw->offloads.vport_reps[vf_vport];
-               if (!rep->valid)
+               if (!rep->rep_if[REP_ETH].valid)
                        continue;
 
                err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val);
@@ -302,7 +302,7 @@ out:
        return err;
 }
 
-static struct mlx5_flow_handle *
+struct mlx5_flow_handle *
 mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn)
 {
        struct mlx5_flow_act flow_act = {0};
@@ -339,57 +339,9 @@ out:
        return flow_rule;
 }
 
-void mlx5_eswitch_sqs2vport_stop(struct mlx5_eswitch *esw,
-                                struct mlx5_eswitch_rep *rep)
-{
-       struct mlx5_esw_sq *esw_sq, *tmp;
-
-       if (esw->mode != SRIOV_OFFLOADS)
-               return;
-
-       list_for_each_entry_safe(esw_sq, tmp, &rep->vport_sqs_list, list) {
-               mlx5_del_flow_rules(esw_sq->send_to_vport_rule);
-               list_del(&esw_sq->list);
-               kfree(esw_sq);
-       }
-}
-
-int mlx5_eswitch_sqs2vport_start(struct mlx5_eswitch *esw,
-                                struct mlx5_eswitch_rep *rep,
-                                u16 *sqns_array, int sqns_num)
+void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule)
 {
-       struct mlx5_flow_handle *flow_rule;
-       struct mlx5_esw_sq *esw_sq;
-       int err;
-       int i;
-
-       if (esw->mode != SRIOV_OFFLOADS)
-               return 0;
-
-       for (i = 0; i < sqns_num; i++) {
-               esw_sq = kzalloc(sizeof(*esw_sq), GFP_KERNEL);
-               if (!esw_sq) {
-                       err = -ENOMEM;
-                       goto out_err;
-               }
-
-               /* Add re-inject rule to the PF/representor sqs */
-               flow_rule = mlx5_eswitch_add_send_to_vport_rule(esw,
-                                                               rep->vport,
-                                                               sqns_array[i]);
-               if (IS_ERR(flow_rule)) {
-                       err = PTR_ERR(flow_rule);
-                       kfree(esw_sq);
-                       goto out_err;
-               }
-               esw_sq->send_to_vport_rule = flow_rule;
-               list_add(&esw_sq->list, &rep->vport_sqs_list);
-       }
-       return 0;
-
-out_err:
-       mlx5_eswitch_sqs2vport_stop(esw, rep);
-       return err;
+       mlx5_del_flow_rules(rule);
 }
 
 static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
@@ -732,12 +684,111 @@ static int esw_offloads_start(struct mlx5_eswitch *esw)
        return err;
 }
 
-int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
+void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw)
+{
+       kfree(esw->offloads.vport_reps);
+}
+
+int esw_offloads_init_reps(struct mlx5_eswitch *esw)
+{
+       int total_vfs = MLX5_TOTAL_VPORTS(esw->dev);
+       struct mlx5_core_dev *dev = esw->dev;
+       struct mlx5_esw_offload *offloads;
+       struct mlx5_eswitch_rep *rep;
+       u8 hw_id[ETH_ALEN];
+       int vport;
+
+       esw->offloads.vport_reps = kcalloc(total_vfs,
+                                          sizeof(struct mlx5_eswitch_rep),
+                                          GFP_KERNEL);
+       if (!esw->offloads.vport_reps)
+               return -ENOMEM;
+
+       offloads = &esw->offloads;
+       mlx5_query_nic_vport_mac_address(dev, 0, hw_id);
+
+       for (vport = 0; vport < total_vfs; vport++) {
+               rep = &offloads->vport_reps[vport];
+
+               rep->vport = vport;
+               ether_addr_copy(rep->hw_id, hw_id);
+       }
+
+       offloads->vport_reps[0].vport = FDB_UPLINK_VPORT;
+
+       return 0;
+}
+
+static void esw_offloads_unload_reps_type(struct mlx5_eswitch *esw, int nvports,
+                                         u8 rep_type)
+{
+       struct mlx5_eswitch_rep *rep;
+       int vport;
+
+       for (vport = nvports - 1; vport >= 0; vport--) {
+               rep = &esw->offloads.vport_reps[vport];
+               if (!rep->rep_if[rep_type].valid)
+                       continue;
+
+               rep->rep_if[rep_type].unload(rep);
+       }
+}
+
+static void esw_offloads_unload_reps(struct mlx5_eswitch *esw, int nvports)
+{
+       u8 rep_type = NUM_REP_TYPES;
+
+       while (rep_type-- > 0)
+               esw_offloads_unload_reps_type(esw, nvports, rep_type);
+}
+
+static int esw_offloads_load_reps_type(struct mlx5_eswitch *esw, int nvports,
+                                      u8 rep_type)
 {
        struct mlx5_eswitch_rep *rep;
        int vport;
        int err;
 
+       for (vport = 0; vport < nvports; vport++) {
+               rep = &esw->offloads.vport_reps[vport];
+               if (!rep->rep_if[rep_type].valid)
+                       continue;
+
+               err = rep->rep_if[rep_type].load(esw->dev, rep);
+               if (err)
+                       goto err_reps;
+       }
+
+       return 0;
+
+err_reps:
+       esw_offloads_unload_reps_type(esw, vport, rep_type);
+       return err;
+}
+
+static int esw_offloads_load_reps(struct mlx5_eswitch *esw, int nvports)
+{
+       u8 rep_type = 0;
+       int err;
+
+       for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) {
+               err = esw_offloads_load_reps_type(esw, nvports, rep_type);
+               if (err)
+                       goto err_reps;
+       }
+
+       return err;
+
+err_reps:
+       while (rep_type-- > 0)
+               esw_offloads_unload_reps_type(esw, nvports, rep_type);
+       return err;
+}
+
+int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
+{
+       int err;
+
        /* disable PF RoCE so missed packets don't go through RoCE steering */
        mlx5_dev_list_lock();
        mlx5_remove_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
@@ -755,25 +806,13 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
        if (err)
                goto create_fg_err;
 
-       for (vport = 0; vport < nvports; vport++) {
-               rep = &esw->offloads.vport_reps[vport];
-               if (!rep->valid)
-                       continue;
-
-               err = rep->load(esw, rep);
-               if (err)
-                       goto err_reps;
-       }
+       err = esw_offloads_load_reps(esw, nvports);
+       if (err)
+               goto err_reps;
 
        return 0;
 
 err_reps:
-       for (vport--; vport >= 0; vport--) {
-               rep = &esw->offloads.vport_reps[vport];
-               if (!rep->valid)
-                       continue;
-               rep->unload(esw, rep);
-       }
        esw_destroy_vport_rx_group(esw);
 
 create_fg_err:
@@ -814,16 +853,7 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw)
 
 void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports)
 {
-       struct mlx5_eswitch_rep *rep;
-       int vport;
-
-       for (vport = nvports - 1; vport >= 0; vport--) {
-               rep = &esw->offloads.vport_reps[vport];
-               if (!rep->valid)
-                       continue;
-               rep->unload(esw, rep);
-       }
-
+       esw_offloads_unload_reps(esw, nvports);
        esw_destroy_vport_rx_group(esw);
        esw_destroy_offloads_table(esw);
        esw_destroy_offloads_fdb_tables(esw);
@@ -1120,27 +1150,23 @@ int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, u8 *encap)
 
 void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw,
                                     int vport_index,
-                                    struct mlx5_eswitch_rep *__rep)
+                                    struct mlx5_eswitch_rep_if *__rep_if,
+                                    u8 rep_type)
 {
        struct mlx5_esw_offload *offloads = &esw->offloads;
-       struct mlx5_eswitch_rep *rep;
-
-       rep = &offloads->vport_reps[vport_index];
+       struct mlx5_eswitch_rep_if *rep_if;
 
-       memset(rep, 0, sizeof(*rep));
+       rep_if = &offloads->vport_reps[vport_index].rep_if[rep_type];
 
-       rep->load   = __rep->load;
-       rep->unload = __rep->unload;
-       rep->vport  = __rep->vport;
-       rep->netdev = __rep->netdev;
-       ether_addr_copy(rep->hw_id, __rep->hw_id);
+       rep_if->load   = __rep_if->load;
+       rep_if->unload = __rep_if->unload;
+       rep_if->priv = __rep_if->priv;
 
-       INIT_LIST_HEAD(&rep->vport_sqs_list);
-       rep->valid = true;
+       rep_if->valid = true;
 }
 
 void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw,
-                                      int vport_index)
+                                      int vport_index, u8 rep_type)
 {
        struct mlx5_esw_offload *offloads = &esw->offloads;
        struct mlx5_eswitch_rep *rep;
@@ -1148,17 +1174,17 @@ void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw,
        rep = &offloads->vport_reps[vport_index];
 
        if (esw->mode == SRIOV_OFFLOADS && esw->vports[vport_index].enabled)
-               rep->unload(esw, rep);
+               rep->rep_if[rep_type].unload(rep);
 
-       rep->valid = false;
+       rep->rep_if[rep_type].valid = false;
 }
 
-struct net_device *mlx5_eswitch_get_uplink_netdev(struct mlx5_eswitch *esw)
+void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type)
 {
 #define UPLINK_REP_INDEX 0
        struct mlx5_esw_offload *offloads = &esw->offloads;
        struct mlx5_eswitch_rep *rep;
 
        rep = &offloads->vport_reps[UPLINK_REP_INDEX];
-       return rep->netdev;
+       return rep->rep_if[rep_type].priv;
 }
index 3c11d6e..1496296 100644 (file)
@@ -66,6 +66,9 @@ static int mlx5_fpga_mem_read_i2c(struct mlx5_fpga_device *fdev, size_t size,
        u8 actual_size;
        int err;
 
+       if (!size)
+               return -EINVAL;
+
        if (!fdev->mdev)
                return -ENOTCONN;
 
@@ -95,6 +98,9 @@ static int mlx5_fpga_mem_write_i2c(struct mlx5_fpga_device *fdev, size_t size,
        u8 actual_size;
        int err;
 
+       if (!size)
+               return -EINVAL;
+
        if (!fdev->mdev)
                return -ENOTCONN;
 
index c70fd66..cc4f6ab 100644 (file)
@@ -174,6 +174,8 @@ static void del_hw_fte(struct fs_node *node);
 static void del_sw_flow_table(struct fs_node *node);
 static void del_sw_flow_group(struct fs_node *node);
 static void del_sw_fte(struct fs_node *node);
+static void del_sw_prio(struct fs_node *node);
+static void del_sw_ns(struct fs_node *node);
 /* Delete rule (destination) is special case that 
  * requires to lock the FTE for all the deletion process.
  */
@@ -408,6 +410,16 @@ static inline struct mlx5_core_dev *get_dev(struct fs_node *node)
        return NULL;
 }
 
+static void del_sw_ns(struct fs_node *node)
+{
+       kfree(node);
+}
+
+static void del_sw_prio(struct fs_node *node)
+{
+       kfree(node);
+}
+
 static void del_hw_flow_table(struct fs_node *node)
 {
        struct mlx5_flow_table *ft;
@@ -2014,16 +2026,6 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
                        return &steering->fdb_root_ns->ns;
                else
                        return NULL;
-       case MLX5_FLOW_NAMESPACE_ESW_EGRESS:
-               if (steering->esw_egress_root_ns)
-                       return &steering->esw_egress_root_ns->ns;
-               else
-                       return NULL;
-       case MLX5_FLOW_NAMESPACE_ESW_INGRESS:
-               if (steering->esw_ingress_root_ns)
-                       return &steering->esw_ingress_root_ns->ns;
-               else
-                       return NULL;
        case MLX5_FLOW_NAMESPACE_SNIFFER_RX:
                if (steering->sniffer_rx_root_ns)
                        return &steering->sniffer_rx_root_ns->ns;
@@ -2054,6 +2056,33 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
 }
 EXPORT_SYMBOL(mlx5_get_flow_namespace);
 
+struct mlx5_flow_namespace *mlx5_get_flow_vport_acl_namespace(struct mlx5_core_dev *dev,
+                                                             enum mlx5_flow_namespace_type type,
+                                                             int vport)
+{
+       struct mlx5_flow_steering *steering = dev->priv.steering;
+
+       if (!steering || vport >= MLX5_TOTAL_VPORTS(dev))
+               return NULL;
+
+       switch (type) {
+       case MLX5_FLOW_NAMESPACE_ESW_EGRESS:
+               if (steering->esw_egress_root_ns &&
+                   steering->esw_egress_root_ns[vport])
+                       return &steering->esw_egress_root_ns[vport]->ns;
+               else
+                       return NULL;
+       case MLX5_FLOW_NAMESPACE_ESW_INGRESS:
+               if (steering->esw_ingress_root_ns &&
+                   steering->esw_ingress_root_ns[vport])
+                       return &steering->esw_ingress_root_ns[vport]->ns;
+               else
+                       return NULL;
+       default:
+               return NULL;
+       }
+}
+
 static struct fs_prio *fs_create_prio(struct mlx5_flow_namespace *ns,
                                      unsigned int prio, int num_levels)
 {
@@ -2064,7 +2093,7 @@ static struct fs_prio *fs_create_prio(struct mlx5_flow_namespace *ns,
                return ERR_PTR(-ENOMEM);
 
        fs_prio->node.type = FS_TYPE_PRIO;
-       tree_init_node(&fs_prio->node, NULL, NULL);
+       tree_init_node(&fs_prio->node, NULL, del_sw_prio);
        tree_add_node(&fs_prio->node, &ns->node);
        fs_prio->num_levels = num_levels;
        fs_prio->prio = prio;
@@ -2090,7 +2119,7 @@ static struct mlx5_flow_namespace *fs_create_namespace(struct fs_prio *prio)
                return ERR_PTR(-ENOMEM);
 
        fs_init_namespace(ns);
-       tree_init_node(&ns->node, NULL, NULL);
+       tree_init_node(&ns->node, NULL, del_sw_ns);
        tree_add_node(&ns->node, &prio->node);
        list_add_tail(&ns->node.list, &prio->node.children);
 
@@ -2331,13 +2360,41 @@ static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns)
        clean_tree(&root_ns->ns.node);
 }
 
+static void cleanup_egress_acls_root_ns(struct mlx5_core_dev *dev)
+{
+       struct mlx5_flow_steering *steering = dev->priv.steering;
+       int i;
+
+       if (!steering->esw_egress_root_ns)
+               return;
+
+       for (i = 0; i < MLX5_TOTAL_VPORTS(dev); i++)
+               cleanup_root_ns(steering->esw_egress_root_ns[i]);
+
+       kfree(steering->esw_egress_root_ns);
+}
+
+static void cleanup_ingress_acls_root_ns(struct mlx5_core_dev *dev)
+{
+       struct mlx5_flow_steering *steering = dev->priv.steering;
+       int i;
+
+       if (!steering->esw_ingress_root_ns)
+               return;
+
+       for (i = 0; i < MLX5_TOTAL_VPORTS(dev); i++)
+               cleanup_root_ns(steering->esw_ingress_root_ns[i]);
+
+       kfree(steering->esw_ingress_root_ns);
+}
+
 void mlx5_cleanup_fs(struct mlx5_core_dev *dev)
 {
        struct mlx5_flow_steering *steering = dev->priv.steering;
 
        cleanup_root_ns(steering->root_ns);
-       cleanup_root_ns(steering->esw_egress_root_ns);
-       cleanup_root_ns(steering->esw_ingress_root_ns);
+       cleanup_egress_acls_root_ns(dev);
+       cleanup_ingress_acls_root_ns(dev);
        cleanup_root_ns(steering->fdb_root_ns);
        cleanup_root_ns(steering->sniffer_rx_root_ns);
        cleanup_root_ns(steering->sniffer_tx_root_ns);
@@ -2406,34 +2463,86 @@ out_err:
        return PTR_ERR(prio);
 }
 
-static int init_ingress_acl_root_ns(struct mlx5_flow_steering *steering)
+static int init_egress_acl_root_ns(struct mlx5_flow_steering *steering, int vport)
 {
        struct fs_prio *prio;
 
-       steering->esw_egress_root_ns = create_root_ns(steering, FS_FT_ESW_EGRESS_ACL);
-       if (!steering->esw_egress_root_ns)
+       steering->esw_egress_root_ns[vport] = create_root_ns(steering, FS_FT_ESW_EGRESS_ACL);
+       if (!steering->esw_egress_root_ns[vport])
                return -ENOMEM;
 
        /* create 1 prio*/
-       prio = fs_create_prio(&steering->esw_egress_root_ns->ns, 0,
-                             MLX5_TOTAL_VPORTS(steering->dev));
+       prio = fs_create_prio(&steering->esw_egress_root_ns[vport]->ns, 0, 1);
        return PTR_ERR_OR_ZERO(prio);
 }
 
-static int init_egress_acl_root_ns(struct mlx5_flow_steering *steering)
+static int init_ingress_acl_root_ns(struct mlx5_flow_steering *steering, int vport)
 {
        struct fs_prio *prio;
 
-       steering->esw_ingress_root_ns = create_root_ns(steering, FS_FT_ESW_INGRESS_ACL);
-       if (!steering->esw_ingress_root_ns)
+       steering->esw_ingress_root_ns[vport] = create_root_ns(steering, FS_FT_ESW_INGRESS_ACL);
+       if (!steering->esw_ingress_root_ns[vport])
                return -ENOMEM;
 
        /* create 1 prio*/
-       prio = fs_create_prio(&steering->esw_ingress_root_ns->ns, 0,
-                             MLX5_TOTAL_VPORTS(steering->dev));
+       prio = fs_create_prio(&steering->esw_ingress_root_ns[vport]->ns, 0, 1);
        return PTR_ERR_OR_ZERO(prio);
 }
 
+static int init_egress_acls_root_ns(struct mlx5_core_dev *dev)
+{
+       struct mlx5_flow_steering *steering = dev->priv.steering;
+       int err;
+       int i;
+
+       steering->esw_egress_root_ns = kcalloc(MLX5_TOTAL_VPORTS(dev),
+                                              sizeof(*steering->esw_egress_root_ns),
+                                              GFP_KERNEL);
+       if (!steering->esw_egress_root_ns)
+               return -ENOMEM;
+
+       for (i = 0; i < MLX5_TOTAL_VPORTS(dev); i++) {
+               err = init_egress_acl_root_ns(steering, i);
+               if (err)
+                       goto cleanup_root_ns;
+       }
+
+       return 0;
+
+cleanup_root_ns:
+       for (i--; i >= 0; i--)
+               cleanup_root_ns(steering->esw_egress_root_ns[i]);
+       kfree(steering->esw_egress_root_ns);
+       return err;
+}
+
+static int init_ingress_acls_root_ns(struct mlx5_core_dev *dev)
+{
+       struct mlx5_flow_steering *steering = dev->priv.steering;
+       int err;
+       int i;
+
+       steering->esw_ingress_root_ns = kcalloc(MLX5_TOTAL_VPORTS(dev),
+                                               sizeof(*steering->esw_ingress_root_ns),
+                                               GFP_KERNEL);
+       if (!steering->esw_ingress_root_ns)
+               return -ENOMEM;
+
+       for (i = 0; i < MLX5_TOTAL_VPORTS(dev); i++) {
+               err = init_ingress_acl_root_ns(steering, i);
+               if (err)
+                       goto cleanup_root_ns;
+       }
+
+       return 0;
+
+cleanup_root_ns:
+       for (i--; i >= 0; i--)
+               cleanup_root_ns(steering->esw_ingress_root_ns[i]);
+       kfree(steering->esw_ingress_root_ns);
+       return err;
+}
+
 int mlx5_init_fs(struct mlx5_core_dev *dev)
 {
        struct mlx5_flow_steering *steering;
@@ -2476,12 +2585,12 @@ int mlx5_init_fs(struct mlx5_core_dev *dev)
                                goto err;
                }
                if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) {
-                       err = init_egress_acl_root_ns(steering);
+                       err = init_egress_acls_root_ns(dev);
                        if (err)
                                goto err;
                }
                if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) {
-                       err = init_ingress_acl_root_ns(steering);
+                       err = init_ingress_acls_root_ns(dev);
                        if (err)
                                goto err;
                }
index 397d24a..3e57104 100644 (file)
@@ -71,8 +71,8 @@ struct mlx5_flow_steering {
        struct kmem_cache               *ftes_cache;
        struct mlx5_flow_root_namespace *root_ns;
        struct mlx5_flow_root_namespace *fdb_root_ns;
-       struct mlx5_flow_root_namespace *esw_egress_root_ns;
-       struct mlx5_flow_root_namespace *esw_ingress_root_ns;
+       struct mlx5_flow_root_namespace **esw_egress_root_ns;
+       struct mlx5_flow_root_namespace **esw_ingress_root_ns;
        struct mlx5_flow_root_namespace *sniffer_tx_root_ns;
        struct mlx5_flow_root_namespace *sniffer_rx_root_ns;
 };
index 1a0e797..21d29f7 100644 (file)
@@ -241,7 +241,7 @@ static void print_health_info(struct mlx5_core_dev *dev)
        u32 fw;
        int i;
 
-       /* If the syndrom is 0, the device is OK and no need to print buffer */
+       /* If the syndrome is 0, the device is OK and no need to print buffer */
        if (!ioread8(&h->synd))
                return;
 
index d2a66dc..8812d72 100644 (file)
@@ -57,7 +57,7 @@ static void mlx5i_build_nic_params(struct mlx5_core_dev *mdev,
                                   struct mlx5e_params *params)
 {
        /* Override RQ params as IPoIB supports only LINKED LIST RQ for now */
-       mlx5e_set_rq_type_params(mdev, params, MLX5_WQ_TYPE_LINKED_LIST);
+       mlx5e_init_rq_type_params(mdev, params, MLX5_WQ_TYPE_LINKED_LIST);
 
        /* RQ size in ipoib by default is 512 */
        params->log_rq_size = is_kdump_kernel() ?
index f26f97f..582b2f1 100644 (file)
@@ -137,6 +137,17 @@ int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev)
 }
 EXPORT_SYMBOL(mlx5_cmd_destroy_vport_lag);
 
+static int mlx5_cmd_query_cong_counter(struct mlx5_core_dev *dev,
+                                      bool reset, void *out, int out_size)
+{
+       u32 in[MLX5_ST_SZ_DW(query_cong_statistics_in)] = { };
+
+       MLX5_SET(query_cong_statistics_in, in, opcode,
+                MLX5_CMD_OP_QUERY_CONG_STATISTICS);
+       MLX5_SET(query_cong_statistics_in, in, clear, reset);
+       return mlx5_cmd_exec(dev, in, sizeof(in), out, out_size);
+}
+
 static struct mlx5_lag *mlx5_lag_dev_get(struct mlx5_core_dev *dev)
 {
        return dev->priv.lag;
@@ -633,3 +644,48 @@ bool mlx5_lag_intf_add(struct mlx5_interface *intf, struct mlx5_priv *priv)
        /* If bonded, we do not add an IB device for PF1. */
        return false;
 }
+
+int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
+                                u64 *values,
+                                int num_counters,
+                                size_t *offsets)
+{
+       int outlen = MLX5_ST_SZ_BYTES(query_cong_statistics_out);
+       struct mlx5_core_dev *mdev[MLX5_MAX_PORTS];
+       struct mlx5_lag *ldev;
+       int num_ports;
+       int ret, i, j;
+       void *out;
+
+       out = kvzalloc(outlen, GFP_KERNEL);
+       if (!out)
+               return -ENOMEM;
+
+       memset(values, 0, sizeof(*values) * num_counters);
+
+       mutex_lock(&lag_mutex);
+       ldev = mlx5_lag_dev_get(dev);
+       if (ldev && mlx5_lag_is_bonded(ldev)) {
+               num_ports = MLX5_MAX_PORTS;
+               mdev[0] = ldev->pf[0].dev;
+               mdev[1] = ldev->pf[1].dev;
+       } else {
+               num_ports = 1;
+               mdev[0] = dev;
+       }
+
+       for (i = 0; i < num_ports; ++i) {
+               ret = mlx5_cmd_query_cong_counter(mdev[i], false, out, outlen);
+               if (ret)
+                       goto unlock;
+
+               for (j = 0; j < num_counters; ++j)
+                       values[j] += be64_to_cpup((__be64 *)(out + offsets[j]));
+       }
+
+unlock:
+       mutex_unlock(&lag_mutex);
+       kvfree(out);
+       return ret;
+}
+EXPORT_SYMBOL(mlx5_lag_query_cong_counters);
index 5f32344..8a89c7e 100644 (file)
@@ -317,9 +317,6 @@ static int mlx5_alloc_irq_vectors(struct mlx5_core_dev *dev)
 {
        struct mlx5_priv *priv = &dev->priv;
        struct mlx5_eq_table *table = &priv->eq_table;
-       struct irq_affinity irqdesc = {
-               .pre_vectors = MLX5_EQ_VEC_COMP_BASE,
-       };
        int num_eqs = 1 << MLX5_CAP_GEN(dev, log_max_eq);
        int nvec;
 
@@ -333,10 +330,9 @@ static int mlx5_alloc_irq_vectors(struct mlx5_core_dev *dev)
        if (!priv->irq_info)
                goto err_free_msix;
 
-       nvec = pci_alloc_irq_vectors_affinity(dev->pdev,
+       nvec = pci_alloc_irq_vectors(dev->pdev,
                        MLX5_EQ_VEC_COMP_BASE + 1, nvec,
-                       PCI_IRQ_MSIX | PCI_IRQ_AFFINITY,
-                       &irqdesc);
+                       PCI_IRQ_MSIX);
        if (nvec < 0)
                return nvec;
 
@@ -622,6 +618,63 @@ u64 mlx5_read_internal_timer(struct mlx5_core_dev *dev)
        return (u64)timer_l | (u64)timer_h1 << 32;
 }
 
+static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i)
+{
+       struct mlx5_priv *priv  = &mdev->priv;
+       int irq = pci_irq_vector(mdev->pdev, MLX5_EQ_VEC_COMP_BASE + i);
+
+       if (!zalloc_cpumask_var(&priv->irq_info[i].mask, GFP_KERNEL)) {
+               mlx5_core_warn(mdev, "zalloc_cpumask_var failed");
+               return -ENOMEM;
+       }
+
+       cpumask_set_cpu(cpumask_local_spread(i, priv->numa_node),
+                       priv->irq_info[i].mask);
+
+       if (IS_ENABLED(CONFIG_SMP) &&
+           irq_set_affinity_hint(irq, priv->irq_info[i].mask))
+               mlx5_core_warn(mdev, "irq_set_affinity_hint failed, irq 0x%.4x", irq);
+
+       return 0;
+}
+
+static void mlx5_irq_clear_affinity_hint(struct mlx5_core_dev *mdev, int i)
+{
+       struct mlx5_priv *priv  = &mdev->priv;
+       int irq = pci_irq_vector(mdev->pdev, MLX5_EQ_VEC_COMP_BASE + i);
+
+       irq_set_affinity_hint(irq, NULL);
+       free_cpumask_var(priv->irq_info[i].mask);
+}
+
+static int mlx5_irq_set_affinity_hints(struct mlx5_core_dev *mdev)
+{
+       int err;
+       int i;
+
+       for (i = 0; i < mdev->priv.eq_table.num_comp_vectors; i++) {
+               err = mlx5_irq_set_affinity_hint(mdev, i);
+               if (err)
+                       goto err_out;
+       }
+
+       return 0;
+
+err_out:
+       for (i--; i >= 0; i--)
+               mlx5_irq_clear_affinity_hint(mdev, i);
+
+       return err;
+}
+
+static void mlx5_irq_clear_affinity_hints(struct mlx5_core_dev *mdev)
+{
+       int i;
+
+       for (i = 0; i < mdev->priv.eq_table.num_comp_vectors; i++)
+               mlx5_irq_clear_affinity_hint(mdev, i);
+}
+
 int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
                    unsigned int *irqn)
 {
@@ -1097,6 +1150,12 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
                goto err_stop_eqs;
        }
 
+       err = mlx5_irq_set_affinity_hints(dev);
+       if (err) {
+               dev_err(&pdev->dev, "Failed to alloc affinity hint cpumask\n");
+               goto err_affinity_hints;
+       }
+
        err = mlx5_init_fs(dev);
        if (err) {
                dev_err(&pdev->dev, "Failed to init flow steering\n");
@@ -1154,6 +1213,9 @@ err_sriov:
        mlx5_cleanup_fs(dev);
 
 err_fs:
+       mlx5_irq_clear_affinity_hints(dev);
+
+err_affinity_hints:
        free_comp_eqs(dev);
 
 err_stop_eqs:
@@ -1222,6 +1284,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
 
        mlx5_sriov_detach(dev);
        mlx5_cleanup_fs(dev);
+       mlx5_irq_clear_affinity_hints(dev);
        free_comp_eqs(dev);
        mlx5_stop_eqs(dev);
        mlx5_put_uars_page(dev, priv->uar);
index db9e665..889130e 100644 (file)
@@ -213,8 +213,8 @@ int mlx5_core_create_qp(struct mlx5_core_dev *dev,
 err_cmd:
        memset(din, 0, sizeof(din));
        memset(dout, 0, sizeof(dout));
-       MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP);
-       MLX5_SET(destroy_qp_in, in, qpn, qp->qpn);
+       MLX5_SET(destroy_qp_in, din, opcode, MLX5_CMD_OP_DESTROY_QP);
+       MLX5_SET(destroy_qp_in, din, qpn, qp->qpn);
        mlx5_cmd_exec(dev, din, sizeof(din), dout, sizeof(dout));
        return err;
 }
index e651e4c..d3c33e9 100644 (file)
@@ -125,16 +125,16 @@ static struct mlx5_rl_entry *find_rl_entry(struct mlx5_rl_table *table,
        return ret_entry;
 }
 
-static int mlx5_set_rate_limit_cmd(struct mlx5_core_dev *dev,
+static int mlx5_set_pp_rate_limit_cmd(struct mlx5_core_dev *dev,
                                   u32 rate, u16 index)
 {
-       u32 in[MLX5_ST_SZ_DW(set_rate_limit_in)]   = {0};
-       u32 out[MLX5_ST_SZ_DW(set_rate_limit_out)] = {0};
+       u32 in[MLX5_ST_SZ_DW(set_pp_rate_limit_in)]   = {0};
+       u32 out[MLX5_ST_SZ_DW(set_pp_rate_limit_out)] = {0};
 
-       MLX5_SET(set_rate_limit_in, in, opcode,
-                MLX5_CMD_OP_SET_RATE_LIMIT);
-       MLX5_SET(set_rate_limit_in, in, rate_limit_index, index);
-       MLX5_SET(set_rate_limit_in, in, rate_limit, rate);
+       MLX5_SET(set_pp_rate_limit_in, in, opcode,
+                MLX5_CMD_OP_SET_PP_RATE_LIMIT);
+       MLX5_SET(set_pp_rate_limit_in, in, rate_limit_index, index);
+       MLX5_SET(set_pp_rate_limit_in, in, rate_limit, rate);
        return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
 }
 
@@ -173,7 +173,7 @@ int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u32 rate, u16 *index)
                entry->refcount++;
        } else {
                /* new rate limit */
-               err = mlx5_set_rate_limit_cmd(dev, rate, entry->index);
+               err = mlx5_set_pp_rate_limit_cmd(dev, rate, entry->index);
                if (err) {
                        mlx5_core_err(dev, "Failed configuring rate: %u (%d)\n",
                                      rate, err);
@@ -209,7 +209,7 @@ void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, u32 rate)
        entry->refcount--;
        if (!entry->refcount) {
                /* need to remove rate */
-               mlx5_set_rate_limit_cmd(dev, 0, entry->index);
+               mlx5_set_pp_rate_limit_cmd(dev, 0, entry->index);
                entry->rate = 0;
        }
 
@@ -262,8 +262,8 @@ void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev)
        /* Clear all configured rates */
        for (i = 0; i < table->max_size; i++)
                if (table->rl_entry[i].rate)
-                       mlx5_set_rate_limit_cmd(dev, 0,
-                                               table->rl_entry[i].index);
+                       mlx5_set_pp_rate_limit_cmd(dev, 0,
+                                                  table->rl_entry[i].index);
 
        kfree(dev->priv.rl_table.rl_entry);
 }
index 07a9ba6..2f74953 100644 (file)
@@ -71,9 +71,9 @@ struct mlx5e_vxlan *mlx5e_vxlan_lookup_port(struct mlx5e_priv *priv, u16 port)
        struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan;
        struct mlx5e_vxlan *vxlan;
 
-       spin_lock(&vxlan_db->lock);
+       spin_lock_bh(&vxlan_db->lock);
        vxlan = radix_tree_lookup(&vxlan_db->tree, port);
-       spin_unlock(&vxlan_db->lock);
+       spin_unlock_bh(&vxlan_db->lock);
 
        return vxlan;
 }
@@ -88,8 +88,12 @@ static void mlx5e_vxlan_add_port(struct work_struct *work)
        struct mlx5e_vxlan *vxlan;
        int err;
 
-       if (mlx5e_vxlan_lookup_port(priv, port))
+       mutex_lock(&priv->state_lock);
+       vxlan = mlx5e_vxlan_lookup_port(priv, port);
+       if (vxlan) {
+               atomic_inc(&vxlan->refcount);
                goto free_work;
+       }
 
        if (mlx5e_vxlan_core_add_port_cmd(priv->mdev, port))
                goto free_work;
@@ -99,10 +103,11 @@ static void mlx5e_vxlan_add_port(struct work_struct *work)
                goto err_delete_port;
 
        vxlan->udp_port = port;
+       atomic_set(&vxlan->refcount, 1);
 
-       spin_lock_irq(&vxlan_db->lock);
+       spin_lock_bh(&vxlan_db->lock);
        err = radix_tree_insert(&vxlan_db->tree, vxlan->udp_port, vxlan);
-       spin_unlock_irq(&vxlan_db->lock);
+       spin_unlock_bh(&vxlan_db->lock);
        if (err)
                goto err_free;
 
@@ -113,35 +118,39 @@ err_free:
 err_delete_port:
        mlx5e_vxlan_core_del_port_cmd(priv->mdev, port);
 free_work:
+       mutex_unlock(&priv->state_lock);
        kfree(vxlan_work);
 }
 
-static void __mlx5e_vxlan_core_del_port(struct mlx5e_priv *priv, u16 port)
+static void mlx5e_vxlan_del_port(struct work_struct *work)
 {
+       struct mlx5e_vxlan_work *vxlan_work =
+               container_of(work, struct mlx5e_vxlan_work, work);
+       struct mlx5e_priv *priv         = vxlan_work->priv;
        struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan;
+       u16 port = vxlan_work->port;
        struct mlx5e_vxlan *vxlan;
+       bool remove = false;
 
-       spin_lock_irq(&vxlan_db->lock);
-       vxlan = radix_tree_delete(&vxlan_db->tree, port);
-       spin_unlock_irq(&vxlan_db->lock);
-
+       mutex_lock(&priv->state_lock);
+       spin_lock_bh(&vxlan_db->lock);
+       vxlan = radix_tree_lookup(&vxlan_db->tree, port);
        if (!vxlan)
-               return;
-
-       mlx5e_vxlan_core_del_port_cmd(priv->mdev, vxlan->udp_port);
-
-       kfree(vxlan);
-}
+               goto out_unlock;
 
-static void mlx5e_vxlan_del_port(struct work_struct *work)
-{
-       struct mlx5e_vxlan_work *vxlan_work =
-               container_of(work, struct mlx5e_vxlan_work, work);
-       struct mlx5e_priv *priv = vxlan_work->priv;
-       u16 port = vxlan_work->port;
+       if (atomic_dec_and_test(&vxlan->refcount)) {
+               radix_tree_delete(&vxlan_db->tree, port);
+               remove = true;
+       }
 
-       __mlx5e_vxlan_core_del_port(priv, port);
+out_unlock:
+       spin_unlock_bh(&vxlan_db->lock);
 
+       if (remove) {
+               mlx5e_vxlan_core_del_port_cmd(priv->mdev, port);
+               kfree(vxlan);
+       }
+       mutex_unlock(&priv->state_lock);
        kfree(vxlan_work);
 }
 
@@ -171,12 +180,11 @@ void mlx5e_vxlan_cleanup(struct mlx5e_priv *priv)
        struct mlx5e_vxlan *vxlan;
        unsigned int port = 0;
 
-       spin_lock_irq(&vxlan_db->lock);
+       /* Lockless since we are the only radix-tree consumers, wq is disabled */
        while (radix_tree_gang_lookup(&vxlan_db->tree, (void **)&vxlan, port, 1)) {
                port = vxlan->udp_port;
-               spin_unlock_irq(&vxlan_db->lock);
-               __mlx5e_vxlan_core_del_port(priv, (u16)port);
-               spin_lock_irq(&vxlan_db->lock);
+               radix_tree_delete(&vxlan_db->tree, port);
+               mlx5e_vxlan_core_del_port_cmd(priv->mdev, port);
+               kfree(vxlan);
        }
-       spin_unlock_irq(&vxlan_db->lock);
 }
index 5def12c..5ef6ae7 100644 (file)
@@ -36,6 +36,7 @@
 #include "en.h"
 
 struct mlx5e_vxlan {
+       atomic_t refcount;
        u16 udp_port;
 };
 
index 72ef4f8..be657b8 100644 (file)
@@ -2436,25 +2436,16 @@ static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp)
        rhashtable_destroy(&mlxsw_sp->router->neigh_ht);
 }
 
-static int mlxsw_sp_neigh_rif_flush(struct mlxsw_sp *mlxsw_sp,
-                                   const struct mlxsw_sp_rif *rif)
-{
-       char rauht_pl[MLXSW_REG_RAUHT_LEN];
-
-       mlxsw_reg_rauht_pack(rauht_pl, MLXSW_REG_RAUHT_OP_WRITE_DELETE_ALL,
-                            rif->rif_index, rif->addr);
-       return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
-}
-
 static void mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
                                         struct mlxsw_sp_rif *rif)
 {
        struct mlxsw_sp_neigh_entry *neigh_entry, *tmp;
 
-       mlxsw_sp_neigh_rif_flush(mlxsw_sp, rif);
        list_for_each_entry_safe(neigh_entry, tmp, &rif->neigh_list,
-                                rif_list_node)
+                                rif_list_node) {
+               mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, false);
                mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
+       }
 }
 
 enum mlxsw_sp_nexthop_type {
index 4f6553f..4b63167 100644 (file)
@@ -84,11 +84,41 @@ static const char *nfp_bpf_extra_cap(struct nfp_app *app, struct nfp_net *nn)
        return nfp_net_ebpf_capable(nn) ? "BPF" : "";
 }
 
+static int
+nfp_bpf_vnic_alloc(struct nfp_app *app, struct nfp_net *nn, unsigned int id)
+{
+       int err;
+
+       nn->app_priv = kzalloc(sizeof(struct nfp_bpf_vnic), GFP_KERNEL);
+       if (!nn->app_priv)
+               return -ENOMEM;
+
+       err = nfp_app_nic_vnic_alloc(app, nn, id);
+       if (err)
+               goto err_free_priv;
+
+       return 0;
+err_free_priv:
+       kfree(nn->app_priv);
+       return err;
+}
+
+static void nfp_bpf_vnic_free(struct nfp_app *app, struct nfp_net *nn)
+{
+       struct nfp_bpf_vnic *bv = nn->app_priv;
+
+       WARN_ON(bv->tc_prog);
+       kfree(bv);
+}
+
 static int nfp_bpf_setup_tc_block_cb(enum tc_setup_type type,
                                     void *type_data, void *cb_priv)
 {
        struct tc_cls_bpf_offload *cls_bpf = type_data;
        struct nfp_net *nn = cb_priv;
+       struct bpf_prog *oldprog;
+       struct nfp_bpf_vnic *bv;
+       int err;
 
        if (type != TC_SETUP_CLSBPF ||
            !tc_can_offload(nn->dp.netdev) ||
@@ -96,8 +126,6 @@ static int nfp_bpf_setup_tc_block_cb(enum tc_setup_type type,
            cls_bpf->common.protocol != htons(ETH_P_ALL) ||
            cls_bpf->common.chain_index)
                return -EOPNOTSUPP;
-       if (nn->dp.bpf_offload_xdp)
-               return -EBUSY;
 
        /* Only support TC direct action */
        if (!cls_bpf->exts_integrated ||
@@ -106,16 +134,25 @@ static int nfp_bpf_setup_tc_block_cb(enum tc_setup_type type,
                return -EOPNOTSUPP;
        }
 
-       switch (cls_bpf->command) {
-       case TC_CLSBPF_REPLACE:
-               return nfp_net_bpf_offload(nn, cls_bpf->prog, true);
-       case TC_CLSBPF_ADD:
-               return nfp_net_bpf_offload(nn, cls_bpf->prog, false);
-       case TC_CLSBPF_DESTROY:
-               return nfp_net_bpf_offload(nn, NULL, true);
-       default:
+       if (cls_bpf->command != TC_CLSBPF_OFFLOAD)
                return -EOPNOTSUPP;
+
+       bv = nn->app_priv;
+       oldprog = cls_bpf->oldprog;
+
+       /* Don't remove if oldprog doesn't match driver's state */
+       if (bv->tc_prog != oldprog) {
+               oldprog = NULL;
+               if (!cls_bpf->prog)
+                       return 0;
        }
+
+       err = nfp_net_bpf_offload(nn, cls_bpf->prog, oldprog);
+       if (err)
+               return err;
+
+       bv->tc_prog = cls_bpf->prog;
+       return 0;
 }
 
 static int nfp_bpf_setup_tc_block(struct net_device *netdev,
@@ -276,7 +313,8 @@ const struct nfp_app_type app_bpf = {
 
        .extra_cap      = nfp_bpf_extra_cap,
 
-       .vnic_alloc     = nfp_app_nic_vnic_alloc,
+       .vnic_alloc     = nfp_bpf_vnic_alloc,
+       .vnic_free      = nfp_bpf_vnic_free,
 
        .setup_tc       = nfp_bpf_setup_tc,
        .tc_busy        = nfp_bpf_tc_busy,
index f49669b..89a9b63 100644 (file)
@@ -228,9 +228,17 @@ struct nfp_prog {
        struct list_head insns;
 };
 
+/**
+ * struct nfp_bpf_vnic - per-vNIC BPF priv structure
+ * @tc_prog:   currently loaded cls_bpf program
+ */
+struct nfp_bpf_vnic {
+       struct bpf_prog *tc_prog;
+};
+
 int nfp_bpf_jit(struct nfp_prog *prog);
 
-extern const struct bpf_ext_analyzer_ops nfp_bpf_analyzer_ops;
+extern const struct bpf_prog_offload_ops nfp_bpf_analyzer_ops;
 
 struct netdev_bpf;
 struct nfp_app;
index 9c26084..d8870c2 100644 (file)
@@ -260,6 +260,6 @@ nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx)
        return 0;
 }
 
-const struct bpf_ext_analyzer_ops nfp_bpf_analyzer_ops = {
+const struct bpf_prog_offload_ops nfp_bpf_analyzer_ops = {
        .insn_hook = nfp_verify_insn,
 };
index e98bb9c..615314d 100644 (file)
@@ -125,6 +125,27 @@ int nfp_flower_cmsg_portmod(struct nfp_repr *repr, bool carrier_ok)
        return 0;
 }
 
+int nfp_flower_cmsg_portreify(struct nfp_repr *repr, bool exists)
+{
+       struct nfp_flower_cmsg_portreify *msg;
+       struct sk_buff *skb;
+
+       skb = nfp_flower_cmsg_alloc(repr->app, sizeof(*msg),
+                                   NFP_FLOWER_CMSG_TYPE_PORT_REIFY,
+                                   GFP_KERNEL);
+       if (!skb)
+               return -ENOMEM;
+
+       msg = nfp_flower_cmsg_get_data(skb);
+       msg->portnum = cpu_to_be32(repr->dst->u.port_info.port_id);
+       msg->reserved = 0;
+       msg->info = cpu_to_be16(exists);
+
+       nfp_ctrl_tx(repr->app->ctrl, skb);
+
+       return 0;
+}
+
 static void
 nfp_flower_cmsg_portmod_rx(struct nfp_app *app, struct sk_buff *skb)
 {
@@ -161,6 +182,28 @@ nfp_flower_cmsg_portmod_rx(struct nfp_app *app, struct sk_buff *skb)
 }
 
 static void
+nfp_flower_cmsg_portreify_rx(struct nfp_app *app, struct sk_buff *skb)
+{
+       struct nfp_flower_priv *priv = app->priv;
+       struct nfp_flower_cmsg_portreify *msg;
+       bool exists;
+
+       msg = nfp_flower_cmsg_get_data(skb);
+
+       rcu_read_lock();
+       exists = !!nfp_app_repr_get(app, be32_to_cpu(msg->portnum));
+       rcu_read_unlock();
+       if (!exists) {
+               nfp_flower_cmsg_warn(app, "ctrl msg for unknown port 0x%08x\n",
+                                    be32_to_cpu(msg->portnum));
+               return;
+       }
+
+       atomic_inc(&priv->reify_replies);
+       wake_up_interruptible(&priv->reify_wait_queue);
+}
+
+static void
 nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb)
 {
        struct nfp_flower_cmsg_hdr *cmsg_hdr;
@@ -176,6 +219,9 @@ nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb)
 
        type = cmsg_hdr->type;
        switch (type) {
+       case NFP_FLOWER_CMSG_TYPE_PORT_REIFY:
+               nfp_flower_cmsg_portreify_rx(app, skb);
+               break;
        case NFP_FLOWER_CMSG_TYPE_PORT_MOD:
                nfp_flower_cmsg_portmod_rx(app, skb);
                break;
index 992d2ee..adfe474 100644 (file)
@@ -350,6 +350,7 @@ struct nfp_flower_cmsg_hdr {
 enum nfp_flower_cmsg_type_port {
        NFP_FLOWER_CMSG_TYPE_FLOW_ADD =         0,
        NFP_FLOWER_CMSG_TYPE_FLOW_DEL =         2,
+       NFP_FLOWER_CMSG_TYPE_PORT_REIFY =       6,
        NFP_FLOWER_CMSG_TYPE_MAC_REPR =         7,
        NFP_FLOWER_CMSG_TYPE_PORT_MOD =         8,
        NFP_FLOWER_CMSG_TYPE_NO_NEIGH =         10,
@@ -386,6 +387,15 @@ struct nfp_flower_cmsg_portmod {
 
 #define NFP_FLOWER_CMSG_PORTMOD_INFO_LINK      BIT(0)
 
+/* NFP_FLOWER_CMSG_TYPE_PORT_REIFY */
+struct nfp_flower_cmsg_portreify {
+       __be32 portnum;
+       u16 reserved;
+       __be16 info;
+};
+
+#define NFP_FLOWER_CMSG_PORTREIFY_INFO_EXIST   BIT(0)
+
 enum nfp_flower_cmsg_port_type {
        NFP_FLOWER_CMSG_PORT_TYPE_UNSPEC =      0x0,
        NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT =   0x1,
@@ -444,6 +454,7 @@ nfp_flower_cmsg_mac_repr_add(struct sk_buff *skb, unsigned int idx,
                             unsigned int nbi, unsigned int nbi_port,
                             unsigned int phys_port);
 int nfp_flower_cmsg_portmod(struct nfp_repr *repr, bool carrier_ok);
+int nfp_flower_cmsg_portreify(struct nfp_repr *repr, bool exists);
 void nfp_flower_cmsg_process_rx(struct work_struct *work);
 void nfp_flower_cmsg_rx(struct nfp_app *app, struct sk_buff *skb);
 struct sk_buff *
index 63160e9..67c4068 100644 (file)
@@ -32,6 +32,7 @@
  */
 
 #include <linux/etherdevice.h>
+#include <linux/lockdep.h>
 #include <linux/pci.h>
 #include <linux/skbuff.h>
 #include <linux/vmalloc.h>
@@ -102,6 +103,52 @@ nfp_flower_repr_get(struct nfp_app *app, u32 port_id)
 }
 
 static int
+nfp_flower_reprs_reify(struct nfp_app *app, enum nfp_repr_type type,
+                      bool exists)
+{
+       struct nfp_reprs *reprs;
+       int i, err, count = 0;
+
+       reprs = rcu_dereference_protected(app->reprs[type],
+                                         lockdep_is_held(&app->pf->lock));
+       if (!reprs)
+               return 0;
+
+       for (i = 0; i < reprs->num_reprs; i++)
+               if (reprs->reprs[i]) {
+                       struct nfp_repr *repr = netdev_priv(reprs->reprs[i]);
+
+                       err = nfp_flower_cmsg_portreify(repr, exists);
+                       if (err)
+                               return err;
+                       count++;
+               }
+
+       return count;
+}
+
+static int
+nfp_flower_wait_repr_reify(struct nfp_app *app, atomic_t *replies, int tot_repl)
+{
+       struct nfp_flower_priv *priv = app->priv;
+       int err;
+
+       if (!tot_repl)
+               return 0;
+
+       lockdep_assert_held(&app->pf->lock);
+       err = wait_event_interruptible_timeout(priv->reify_wait_queue,
+                                              atomic_read(replies) >= tot_repl,
+                                              msecs_to_jiffies(10));
+       if (err <= 0) {
+               nfp_warn(app->cpp, "Not all reprs responded to reify\n");
+               return -EIO;
+       }
+
+       return 0;
+}
+
+static int
 nfp_flower_repr_netdev_open(struct nfp_app *app, struct nfp_repr *repr)
 {
        int err;
@@ -110,7 +157,6 @@ nfp_flower_repr_netdev_open(struct nfp_app *app, struct nfp_repr *repr)
        if (err)
                return err;
 
-       netif_carrier_on(repr->netdev);
        netif_tx_wake_all_queues(repr->netdev);
 
        return 0;
@@ -119,7 +165,6 @@ nfp_flower_repr_netdev_open(struct nfp_app *app, struct nfp_repr *repr)
 static int
 nfp_flower_repr_netdev_stop(struct nfp_app *app, struct nfp_repr *repr)
 {
-       netif_carrier_off(repr->netdev);
        netif_tx_disable(repr->netdev);
 
        return nfp_flower_cmsg_portmod(repr, false);
@@ -140,6 +185,24 @@ nfp_flower_repr_netdev_clean(struct nfp_app *app, struct net_device *netdev)
                                     netdev_priv(netdev));
 }
 
+static void
+nfp_flower_repr_netdev_preclean(struct nfp_app *app, struct net_device *netdev)
+{
+       struct nfp_repr *repr = netdev_priv(netdev);
+       struct nfp_flower_priv *priv = app->priv;
+       atomic_t *replies = &priv->reify_replies;
+       int err;
+
+       atomic_set(replies, 0);
+       err = nfp_flower_cmsg_portreify(repr, false);
+       if (err) {
+               nfp_warn(app->cpp, "Failed to notify firmware about repr destruction\n");
+               return;
+       }
+
+       nfp_flower_wait_repr_reify(app, replies, 1);
+}
+
 static void nfp_flower_sriov_disable(struct nfp_app *app)
 {
        struct nfp_flower_priv *priv = app->priv;
@@ -157,10 +220,11 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app,
 {
        u8 nfp_pcie = nfp_cppcore_pcie_unit(app->pf->cpp);
        struct nfp_flower_priv *priv = app->priv;
+       atomic_t *replies = &priv->reify_replies;
        enum nfp_port_type port_type;
        struct nfp_reprs *reprs;
+       int i, err, reify_cnt;
        const u8 queue = 0;
-       int i, err;
 
        port_type = repr_type == NFP_REPR_TYPE_PF ? NFP_PORT_PF_PORT :
                                                    NFP_PORT_VF_PORT;
@@ -211,7 +275,21 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app,
 
        nfp_app_reprs_set(app, repr_type, reprs);
 
+       atomic_set(replies, 0);
+       reify_cnt = nfp_flower_reprs_reify(app, repr_type, true);
+       if (reify_cnt < 0) {
+               err = reify_cnt;
+               nfp_warn(app->cpp, "Failed to notify firmware about repr creation\n");
+               goto err_reprs_remove;
+       }
+
+       err = nfp_flower_wait_repr_reify(app, replies, reify_cnt);
+       if (err)
+               goto err_reprs_remove;
+
        return 0;
+err_reprs_remove:
+       reprs = nfp_app_reprs_set(app, repr_type, NULL);
 err_reprs_clean:
        nfp_reprs_clean_and_free(reprs);
        return err;
@@ -233,10 +311,11 @@ static int
 nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv)
 {
        struct nfp_eth_table *eth_tbl = app->pf->eth_tbl;
+       atomic_t *replies = &priv->reify_replies;
        struct sk_buff *ctrl_skb;
        struct nfp_reprs *reprs;
+       int err, reify_cnt;
        unsigned int i;
-       int err;
 
        ctrl_skb = nfp_flower_cmsg_mac_repr_start(app, eth_tbl->count);
        if (!ctrl_skb)
@@ -293,16 +372,30 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv)
 
        nfp_app_reprs_set(app, NFP_REPR_TYPE_PHYS_PORT, reprs);
 
-       /* The MAC_REPR control message should be sent after the MAC
+       /* The REIFY/MAC_REPR control messages should be sent after the MAC
         * representors are registered using nfp_app_reprs_set().  This is
         * because the firmware may respond with control messages for the
         * MAC representors, f.e. to provide the driver with information
         * about their state, and without registration the driver will drop
         * any such messages.
         */
+       atomic_set(replies, 0);
+       reify_cnt = nfp_flower_reprs_reify(app, NFP_REPR_TYPE_PHYS_PORT, true);
+       if (reify_cnt < 0) {
+               err = reify_cnt;
+               nfp_warn(app->cpp, "Failed to notify firmware about repr creation\n");
+               goto err_reprs_remove;
+       }
+
+       err = nfp_flower_wait_repr_reify(app, replies, reify_cnt);
+       if (err)
+               goto err_reprs_remove;
+
        nfp_ctrl_tx(app->ctrl, ctrl_skb);
 
        return 0;
+err_reprs_remove:
+       reprs = nfp_app_reprs_set(app, NFP_REPR_TYPE_PHYS_PORT, NULL);
 err_reprs_clean:
        nfp_reprs_clean_and_free(reprs);
 err_free_ctrl_skb:
@@ -419,6 +512,7 @@ static int nfp_flower_init(struct nfp_app *app)
        app_priv->app = app;
        skb_queue_head_init(&app_priv->cmsg_skbs);
        INIT_WORK(&app_priv->cmsg_work, nfp_flower_cmsg_process_rx);
+       init_waitqueue_head(&app_priv->reify_wait_queue);
 
        err = nfp_flower_metadata_init(app);
        if (err)
@@ -476,6 +570,7 @@ const struct nfp_app_type app_flower = {
        .vnic_clean     = nfp_flower_vnic_clean,
 
        .repr_init      = nfp_flower_repr_netdev_init,
+       .repr_preclean  = nfp_flower_repr_netdev_preclean,
        .repr_clean     = nfp_flower_repr_netdev_clean,
 
        .repr_open      = nfp_flower_repr_netdev_open,
index 6e3937a..332ff0f 100644 (file)
@@ -102,6 +102,9 @@ struct nfp_fl_stats_id {
  * @nfp_mac_off_count: Number of MACs in address list
  * @nfp_tun_mac_nb:    Notifier to monitor link state
  * @nfp_tun_neigh_nb:  Notifier to monitor neighbour state
+ * @reify_replies:     atomically stores the number of replies received
+ *                     from firmware for repr reify
+ * @reify_wait_queue:  wait queue for repr reify response counting
  */
 struct nfp_flower_priv {
        struct nfp_app *app;
@@ -127,6 +130,8 @@ struct nfp_flower_priv {
        int nfp_mac_off_count;
        struct notifier_block nfp_tun_mac_nb;
        struct notifier_block nfp_tun_neigh_nb;
+       atomic_t reify_replies;
+       wait_queue_head_t reify_wait_queue;
 };
 
 struct nfp_fl_key_ls {
index 0e5e030..3af1943 100644 (file)
@@ -77,6 +77,8 @@ extern const struct nfp_app_type app_flower;
  * @vnic_init: vNIC netdev was registered
  * @vnic_clean:        vNIC netdev about to be unregistered
  * @repr_init: representor about to be registered
+ * @repr_preclean:     representor about to unregistered, executed before app
+ *                     reference to the it is removed
  * @repr_clean:        representor about to be unregistered
  * @repr_open: representor netdev open callback
  * @repr_stop: representor netdev stop callback
@@ -112,6 +114,7 @@ struct nfp_app_type {
        void (*vnic_clean)(struct nfp_app *app, struct nfp_net *nn);
 
        int (*repr_init)(struct nfp_app *app, struct net_device *netdev);
+       void (*repr_preclean)(struct nfp_app *app, struct net_device *netdev);
        void (*repr_clean)(struct nfp_app *app, struct net_device *netdev);
 
        int (*repr_open)(struct nfp_app *app, struct nfp_repr *repr);
@@ -226,6 +229,13 @@ nfp_app_repr_init(struct nfp_app *app, struct net_device *netdev)
 }
 
 static inline void
+nfp_app_repr_preclean(struct nfp_app *app, struct net_device *netdev)
+{
+       if (app->type->repr_preclean)
+               app->type->repr_preclean(app, netdev);
+}
+
+static inline void
 nfp_app_repr_clean(struct nfp_app *app, struct net_device *netdev)
 {
        if (app->type->repr_clean)
index 3801c52..0e564cf 100644 (file)
@@ -47,6 +47,7 @@
 #include <linux/netdevice.h>
 #include <linux/pci.h>
 #include <linux/io-64-nonatomic-hi-lo.h>
+#include <net/xdp.h>
 
 #include "nfp_net_ctrl.h"
 
@@ -350,6 +351,7 @@ struct nfp_net_rx_buf {
  * @rxds:       Virtual address of FL/RX ring in host memory
  * @dma:        DMA address of the FL/RX ring
  * @size:       Size, in bytes, of the FL/RX ring (needed to free)
+ * @xdp_rxq:    RX-ring info avail for XDP
  */
 struct nfp_net_rx_ring {
        struct nfp_net_r_vector *r_vec;
@@ -361,13 +363,14 @@ struct nfp_net_rx_ring {
        u32 idx;
 
        int fl_qcidx;
+       unsigned int size;
        u8 __iomem *qcp_fl;
 
        struct nfp_net_rx_buf *rxbufs;
        struct nfp_net_rx_desc *rxds;
 
        dma_addr_t dma;
-       unsigned int size;
+       struct xdp_rxq_info xdp_rxq;
 } ____cacheline_aligned;
 
 /**
index 0add487..05e071b 100644 (file)
@@ -1608,11 +1608,13 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
        unsigned int true_bufsz;
        struct sk_buff *skb;
        int pkts_polled = 0;
+       struct xdp_buff xdp;
        int idx;
 
        rcu_read_lock();
        xdp_prog = READ_ONCE(dp->xdp_prog);
        true_bufsz = xdp_prog ? PAGE_SIZE : dp->fl_bufsz;
+       xdp.rxq = &rx_ring->xdp_rxq;
        tx_ring = r_vec->xdp_ring;
 
        while (pkts_polled < budget) {
@@ -1703,7 +1705,6 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
                                  dp->bpf_offload_xdp) && !meta.portid) {
                        void *orig_data = rxbuf->frag + pkt_off;
                        unsigned int dma_off;
-                       struct xdp_buff xdp;
                        int act;
 
                        xdp.data_hard_start = rxbuf->frag + NFP_NET_RX_BUF_HEADROOM;
@@ -2252,6 +2253,7 @@ static void nfp_net_rx_ring_free(struct nfp_net_rx_ring *rx_ring)
        struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
        struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
 
+       xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
        kfree(rx_ring->rxbufs);
 
        if (rx_ring->rxds)
@@ -2275,7 +2277,11 @@ static void nfp_net_rx_ring_free(struct nfp_net_rx_ring *rx_ring)
 static int
 nfp_net_rx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring)
 {
-       int sz;
+       int sz, err;
+
+       err = xdp_rxq_info_reg(&rx_ring->xdp_rxq, dp->netdev, rx_ring->idx);
+       if (err < 0)
+               return err;
 
        rx_ring->cnt = dp->rxd_cnt;
        rx_ring->size = sizeof(*rx_ring->rxds) * rx_ring->cnt;
@@ -2850,6 +2856,11 @@ static void nfp_net_set_rx_mode(struct net_device *netdev)
 
        new_ctrl = nn->dp.ctrl;
 
+       if (!netdev_mc_empty(netdev) || netdev->flags & IFF_ALLMULTI)
+               new_ctrl |= nn->cap & NFP_NET_CFG_CTRL_L2MC;
+       else
+               new_ctrl &= ~NFP_NET_CFG_CTRL_L2MC;
+
        if (netdev->flags & IFF_PROMISC) {
                if (nn->cap & NFP_NET_CFG_CTRL_PROMISC)
                        new_ctrl |= NFP_NET_CFG_CTRL_PROMISC;
@@ -3787,8 +3798,6 @@ int nfp_net_init(struct nfp_net *nn)
        /* Allow L2 Broadcast and Multicast through by default, if supported */
        if (nn->cap & NFP_NET_CFG_CTRL_L2BC)
                nn->dp.ctrl |= NFP_NET_CFG_CTRL_L2BC;
-       if (nn->cap & NFP_NET_CFG_CTRL_L2MC)
-               nn->dp.ctrl |= NFP_NET_CFG_CTRL_L2MC;
 
        /* Allow IRQ moderation, if supported */
        if (nn->cap & NFP_NET_CFG_CTRL_IRQMOD) {
index 78b36c6..f50aa11 100644 (file)
@@ -336,6 +336,8 @@ struct net_device *nfp_repr_alloc(struct nfp_app *app)
        if (!netdev)
                return NULL;
 
+       netif_carrier_off(netdev);
+
        repr = netdev_priv(netdev);
        repr->netdev = netdev;
        repr->app = app;
@@ -375,11 +377,22 @@ nfp_reprs_clean_and_free_by_type(struct nfp_app *app,
                                 enum nfp_repr_type type)
 {
        struct nfp_reprs *reprs;
+       int i;
 
-       reprs = nfp_app_reprs_set(app, type, NULL);
+       reprs = rcu_dereference_protected(app->reprs[type],
+                                         lockdep_is_held(&app->pf->lock));
        if (!reprs)
                return;
 
+       /* Preclean must happen before we remove the reprs reference from the
+        * app below.
+        */
+       for (i = 0; i < reprs->num_reprs; i++)
+               if (reprs->reprs[i])
+                       nfp_app_repr_preclean(app, reprs->reprs[i]);
+
+       reprs = nfp_app_reprs_set(app, type, NULL);
+
        synchronize_rcu();
        nfp_reprs_clean_and_free(reprs);
 }
@@ -418,8 +431,10 @@ int nfp_reprs_resync_phys_ports(struct nfp_app *app)
                        continue;
 
                repr = netdev_priv(old_reprs->reprs[i]);
-               if (repr->port->type == NFP_PORT_INVALID)
+               if (repr->port->type == NFP_PORT_INVALID) {
+                       nfp_app_repr_preclean(app, old_reprs->reprs[i]);
                        continue;
+               }
 
                reprs->reprs[i] = old_reprs->reprs[i];
        }
@@ -436,7 +451,6 @@ int nfp_reprs_resync_phys_ports(struct nfp_app *app)
                if (repr->port->type != NFP_PORT_INVALID)
                        continue;
 
-               nfp_app_repr_stop(app, repr);
                nfp_repr_clean(repr);
        }
 
index 49d6d78..a79b9f8 100644 (file)
@@ -1817,7 +1817,7 @@ static int nv_alloc_rx(struct net_device *dev)
 
        while (np->put_rx.orig != less_rx) {
                struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz + NV_RX_ALLOC_PAD);
-               if (skb) {
+               if (likely(skb)) {
                        np->put_rx_ctx->skb = skb;
                        np->put_rx_ctx->dma = dma_map_single(&np->pci_dev->dev,
                                                             skb->data,
@@ -1858,7 +1858,7 @@ static int nv_alloc_rx_optimized(struct net_device *dev)
 
        while (np->put_rx.ex != less_rx) {
                struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz + NV_RX_ALLOC_PAD);
-               if (skb) {
+               if (likely(skb)) {
                        np->put_rx_ctx->skb = skb;
                        np->put_rx_ctx->dma = dma_map_single(&np->pci_dev->dev,
                                                             skb->data,
index c9a55b7..07a2eb3 100644 (file)
@@ -212,9 +212,7 @@ static int pasemi_get_mac_addr(struct pasemi_mac *mac)
                return -ENOENT;
        }
 
-       if (sscanf(maddr, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx",
-                  &addr[0], &addr[1], &addr[2], &addr[3], &addr[4], &addr[5])
-           != ETH_ALEN) {
+       if (!mac_pton(maddr, addr)) {
                dev_warn(&pdev->dev,
                         "can't parse mac address, not configuring\n");
                return -EINVAL;
index 26ddf09..0ee2490 100644 (file)
@@ -85,6 +85,7 @@ config QED
        tristate "QLogic QED 25/40/100Gb core driver"
        depends on PCI
        select ZLIB_INFLATE
+       select CRC8
        ---help---
          This enables the support for ...
 
index 91003bc..6948855 100644 (file)
 
 extern const struct qed_common_ops qed_common_ops_pass;
 
-#define QED_MAJOR_VERSION               8
-#define QED_MINOR_VERSION               10
-#define QED_REVISION_VERSION            11
-#define QED_ENGINEERING_VERSION 21
+#define QED_MAJOR_VERSION              8
+#define QED_MINOR_VERSION              33
+#define QED_REVISION_VERSION           0
+#define QED_ENGINEERING_VERSION                20
 
 #define QED_VERSION                                             \
        ((QED_MAJOR_VERSION << 24) | (QED_MINOR_VERSION << 16) | \
@@ -778,8 +778,8 @@ static inline u8 qed_concrete_to_sw_fid(struct qed_dev *cdev,
        return sw_fid;
 }
 
-#define PURE_LB_TC 8
-#define PKT_LB_TC 9
+#define PKT_LB_TC      9
+#define MAX_NUM_VOQS_E4        20
 
 int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate);
 void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev,
index afd07ad..6f546e8 100644 (file)
 
 /* connection context union */
 union conn_context {
-       struct core_conn_context core_ctx;
-       struct eth_conn_context eth_ctx;
-       struct iscsi_conn_context iscsi_ctx;
-       struct fcoe_conn_context fcoe_ctx;
-       struct roce_conn_context roce_ctx;
+       struct e4_core_conn_context core_ctx;
+       struct e4_eth_conn_context eth_ctx;
+       struct e4_iscsi_conn_context iscsi_ctx;
+       struct e4_fcoe_conn_context fcoe_ctx;
+       struct e4_roce_conn_context roce_ctx;
 };
 
 /* TYPE-0 task context - iSCSI, FCOE */
 union type0_task_context {
-       struct iscsi_task_context iscsi_ctx;
-       struct fcoe_task_context fcoe_ctx;
+       struct e4_iscsi_task_context iscsi_ctx;
+       struct e4_fcoe_task_context fcoe_ctx;
 };
 
 /* TYPE-1 task context - ROCE */
 union type1_task_context {
-       struct rdma_task_context roce_ctx;
+       struct e4_rdma_task_context roce_ctx;
 };
 
 struct src_ent {
@@ -109,8 +109,8 @@ struct src_ent {
        u64 next;
 };
 
-#define CDUT_SEG_ALIGNMET 3    /* in 4k chunks */
-#define CDUT_SEG_ALIGNMET_IN_BYTES (1 << (CDUT_SEG_ALIGNMET + 12))
+#define CDUT_SEG_ALIGNMET              3 /* in 4k chunks */
+#define CDUT_SEG_ALIGNMET_IN_BYTES     BIT(CDUT_SEG_ALIGNMET + 12)
 
 #define CONN_CXT_SIZE(p_hwfn) \
        ALIGNED_TYPE_SIZE(union conn_context, p_hwfn)
@@ -742,7 +742,7 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn, u32 *line_count)
        p_blk = qed_cxt_set_blk(&p_cli->pf_blks[0]);
 
        qed_cxt_qm_iids(p_hwfn, &qm_iids);
-       total = qed_qm_pf_mem_size(p_hwfn->rel_pf_id, qm_iids.cids,
+       total = qed_qm_pf_mem_size(qm_iids.cids,
                                   qm_iids.vf_cids, qm_iids.tids,
                                   p_hwfn->qm_info.num_pqs,
                                   p_hwfn->qm_info.num_vf_pqs);
@@ -1055,11 +1055,10 @@ static int qed_ilt_blk_alloc(struct qed_hwfn *p_hwfn,
                u32 size;
 
                size = min_t(u32, sz_left, p_blk->real_size_in_page);
-               p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
-                                           size, &p_phys, GFP_KERNEL);
+               p_virt = dma_zalloc_coherent(&p_hwfn->cdev->pdev->dev, size,
+                                            &p_phys, GFP_KERNEL);
                if (!p_virt)
                        return -ENOMEM;
-               memset(p_virt, 0, size);
 
                ilt_shadow[line].p_phys = p_phys;
                ilt_shadow[line].p_virt = p_virt;
@@ -1496,20 +1495,24 @@ static void qed_cdu_init_pf(struct qed_hwfn *p_hwfn)
        }
 }
 
-void qed_qm_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+void qed_qm_init_pf(struct qed_hwfn *p_hwfn,
+                   struct qed_ptt *p_ptt, bool is_pf_loading)
 {
-       struct qed_qm_pf_rt_init_params params;
        struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+       struct qed_qm_pf_rt_init_params params;
+       struct qed_mcp_link_state *p_link;
        struct qed_qm_iids iids;
 
        memset(&iids, 0, sizeof(iids));
        qed_cxt_qm_iids(p_hwfn, &iids);
 
+       p_link = &QED_LEADING_HWFN(p_hwfn->cdev)->mcp_info->link_output;
+
        memset(&params, 0, sizeof(params));
        params.port_id = p_hwfn->port_id;
        params.pf_id = p_hwfn->rel_pf_id;
        params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port;
-       params.is_first_pf = p_hwfn->first_on_engine;
+       params.is_pf_loading = is_pf_loading;
        params.num_pf_cids = iids.cids;
        params.num_vf_cids = iids.vf_cids;
        params.num_tids = iids.tids;
@@ -1520,6 +1523,7 @@ void qed_qm_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
        params.num_vports = qm_info->num_vports;
        params.pf_wfq = qm_info->pf_wfq;
        params.pf_rl = qm_info->pf_rl;
+       params.link_speed = p_link->speed;
        params.pq_params = qm_info->qm_pq_params;
        params.vport_params = qm_info->qm_vport_params;
 
@@ -1883,7 +1887,7 @@ void qed_cxt_hw_init_common(struct qed_hwfn *p_hwfn)
 
 void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 {
-       qed_qm_init_pf(p_hwfn, p_ptt);
+       qed_qm_init_pf(p_hwfn, p_ptt, true);
        qed_cm_init_pf(p_hwfn);
        qed_dq_init_pf(p_hwfn);
        qed_cdu_init_pf(p_hwfn);
@@ -2303,14 +2307,13 @@ qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn,
                goto out0;
        }
 
-       p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
-                                   p_blk->real_size_in_page,
-                                   &p_phys, GFP_KERNEL);
+       p_virt = dma_zalloc_coherent(&p_hwfn->cdev->pdev->dev,
+                                    p_blk->real_size_in_page, &p_phys,
+                                    GFP_KERNEL);
        if (!p_virt) {
                rc = -ENOMEM;
                goto out1;
        }
-       memset(p_virt, 0, p_blk->real_size_in_page);
 
        /* configuration of refTagMask to 0xF is required for RoCE DIF MR only,
         * to compensate for a HW bug, but it is configured even if DIF is not
@@ -2326,7 +2329,7 @@ qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn,
                for (elem_i = 0; elem_i < elems_per_p; elem_i++) {
                        elem = (union type1_task_context *)elem_start;
                        SET_FIELD(elem->roce_ctx.tdif_context.flags1,
-                                 TDIF_TASK_CONTEXT_REFTAGMASK, 0xf);
+                                 TDIF_TASK_CONTEXT_REF_TAG_MASK, 0xf);
                        elem_start += TYPE1_TASK_CXT_SIZE(p_hwfn);
                }
        }
index 1783634..a4e9586 100644 (file)
@@ -169,8 +169,10 @@ void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
  *
  * @param p_hwfn
  * @param p_ptt
+ * @param is_pf_loading
  */
-void qed_qm_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+void qed_qm_init_pf(struct qed_hwfn *p_hwfn,
+                   struct qed_ptt *p_ptt, bool is_pf_loading);
 
 /**
  * @brief Reconfigures QM pf on the fly
index fe7c1f2..449777f 100644 (file)
@@ -954,9 +954,7 @@ void qed_dcbx_set_pf_update_params(struct qed_dcbx_results *p_src,
                                   struct pf_update_ramrod_data *p_dest)
 {
        struct protocol_dcb_data *p_dcb_data;
-       bool update_flag = false;
-
-       p_dest->pf_id = p_src->pf_id;
+       u8 update_flag;
 
        update_flag = p_src->arr[DCBX_PROTOCOL_FCOE].update;
        p_dest->update_fcoe_dcb_data_mode = update_flag;
index 03c3cf7..f2633ec 100644 (file)
@@ -21,25 +21,26 @@ enum mem_groups {
        MEM_GROUP_DMAE_MEM,
        MEM_GROUP_CM_MEM,
        MEM_GROUP_QM_MEM,
-       MEM_GROUP_TM_MEM,
+       MEM_GROUP_DORQ_MEM,
        MEM_GROUP_BRB_RAM,
        MEM_GROUP_BRB_MEM,
        MEM_GROUP_PRS_MEM,
-       MEM_GROUP_SDM_MEM,
        MEM_GROUP_IOR,
-       MEM_GROUP_RAM,
        MEM_GROUP_BTB_RAM,
-       MEM_GROUP_RDIF_CTX,
-       MEM_GROUP_TDIF_CTX,
-       MEM_GROUP_CFC_MEM,
        MEM_GROUP_CONN_CFC_MEM,
        MEM_GROUP_TASK_CFC_MEM,
        MEM_GROUP_CAU_PI,
        MEM_GROUP_CAU_MEM,
        MEM_GROUP_PXP_ILT,
+       MEM_GROUP_TM_MEM,
+       MEM_GROUP_SDM_MEM,
        MEM_GROUP_PBUF,
+       MEM_GROUP_RAM,
        MEM_GROUP_MULD_MEM,
        MEM_GROUP_BTB_MEM,
+       MEM_GROUP_RDIF_CTX,
+       MEM_GROUP_TDIF_CTX,
+       MEM_GROUP_CFC_MEM,
        MEM_GROUP_IGU_MEM,
        MEM_GROUP_IGU_MSIX,
        MEM_GROUP_CAU_SB,
@@ -54,25 +55,26 @@ static const char * const s_mem_group_names[] = {
        "DMAE_MEM",
        "CM_MEM",
        "QM_MEM",
-       "TM_MEM",
+       "DORQ_MEM",
        "BRB_RAM",
        "BRB_MEM",
        "PRS_MEM",
-       "SDM_MEM",
        "IOR",
-       "RAM",
        "BTB_RAM",
-       "RDIF_CTX",
-       "TDIF_CTX",
-       "CFC_MEM",
        "CONN_CFC_MEM",
        "TASK_CFC_MEM",
        "CAU_PI",
        "CAU_MEM",
        "PXP_ILT",
+       "TM_MEM",
+       "SDM_MEM",
        "PBUF",
+       "RAM",
        "MULD_MEM",
        "BTB_MEM",
+       "RDIF_CTX",
+       "TDIF_CTX",
+       "CFC_MEM",
        "IGU_MEM",
        "IGU_MSIX",
        "CAU_SB",
@@ -92,11 +94,6 @@ static u32 cond7(const u32 *r, const u32 *imm)
        return ((r[0] >> imm[0]) & imm[1]) != imm[2];
 }
 
-static u32 cond14(const u32 *r, const u32 *imm)
-{
-       return (r[0] != imm[0]) && (((r[1] >> imm[1]) & imm[2]) == imm[3]);
-}
-
 static u32 cond6(const u32 *r, const u32 *imm)
 {
        return (r[0] & imm[0]) != imm[1];
@@ -174,7 +171,6 @@ static u32(*cond_arr[]) (const u32 *r, const u32 *imm) = {
        cond11,
        cond12,
        cond13,
-       cond14,
 };
 
 /******************************* Data Types **********************************/
@@ -203,6 +199,8 @@ struct chip_defs {
 struct platform_defs {
        const char *name;
        u32 delay_factor;
+       u32 dmae_thresh;
+       u32 log_thresh;
 };
 
 /* Storm constant definitions.
@@ -234,7 +232,7 @@ struct storm_defs {
 /* Block constant definitions */
 struct block_defs {
        const char *name;
-       bool has_dbg_bus[MAX_CHIP_IDS];
+       bool exists[MAX_CHIP_IDS];
        bool associated_to_storm;
 
        /* Valid only if associated_to_storm is true */
@@ -258,8 +256,8 @@ struct block_defs {
 /* Reset register definitions */
 struct reset_reg_defs {
        u32 addr;
-       u32 unreset_val;
        bool exists[MAX_CHIP_IDS];
+       u32 unreset_val[MAX_CHIP_IDS];
 };
 
 struct grc_param_defs {
@@ -276,8 +274,8 @@ struct rss_mem_defs {
        const char *mem_name;
        const char *type_name;
        u32 addr;
+       u32 entry_width;
        u32 num_entries[MAX_CHIP_IDS];
-       u32 entry_width[MAX_CHIP_IDS];
 };
 
 struct vfc_ram_defs {
@@ -294,7 +292,9 @@ struct big_ram_defs {
        enum dbg_grc_params grc_param;
        u32 addr_reg_addr;
        u32 data_reg_addr;
-       u32 num_of_blocks[MAX_CHIP_IDS];
+       u32 is_256b_reg_addr;
+       u32 is_256b_bit_offset[MAX_CHIP_IDS];
+       u32 ram_size[MAX_CHIP_IDS]; /* In dwords */
 };
 
 struct phy_defs {
@@ -358,20 +358,14 @@ struct phy_defs {
                        (arr)[i] = qed_rd(dev, ptt, addr); \
        } while (0)
 
-#ifndef DWORDS_TO_BYTES
 #define DWORDS_TO_BYTES(dwords)                ((dwords) * BYTES_IN_DWORD)
-#endif
-#ifndef BYTES_TO_DWORDS
 #define BYTES_TO_DWORDS(bytes)         ((bytes) / BYTES_IN_DWORD)
-#endif
 
-/* extra lines include a signature line + optional latency events line */
-#ifndef NUM_DBG_LINES
+/* Extra lines include a signature line + optional latency events line */
 #define NUM_EXTRA_DBG_LINES(block_desc) \
        (1 + ((block_desc)->has_latency_events ? 1 : 0))
 #define NUM_DBG_LINES(block_desc) \
        ((block_desc)->num_of_lines + NUM_EXTRA_DBG_LINES(block_desc))
-#endif
 
 #define RAM_LINES_TO_DWORDS(lines)     ((lines) * 2)
 #define RAM_LINES_TO_BYTES(lines) \
@@ -424,9 +418,6 @@ struct phy_defs {
 #define NUM_RSS_MEM_TYPES              5
 
 #define NUM_BIG_RAM_TYPES              3
-#define BIG_RAM_BLOCK_SIZE_BYTES       128
-#define BIG_RAM_BLOCK_SIZE_DWORDS \
-       BYTES_TO_DWORDS(BIG_RAM_BLOCK_SIZE_BYTES)
 
 #define NUM_PHY_TBUS_ADDRESSES         2048
 #define PHY_DUMP_SIZE_DWORDS           (NUM_PHY_TBUS_ADDRESSES / 2)
@@ -441,23 +432,17 @@ struct phy_defs {
 
 #define FW_IMG_MAIN                    1
 
-#ifndef REG_FIFO_ELEMENT_DWORDS
 #define REG_FIFO_ELEMENT_DWORDS                2
-#endif
 #define REG_FIFO_DEPTH_ELEMENTS                32
 #define REG_FIFO_DEPTH_DWORDS \
        (REG_FIFO_ELEMENT_DWORDS * REG_FIFO_DEPTH_ELEMENTS)
 
-#ifndef IGU_FIFO_ELEMENT_DWORDS
 #define IGU_FIFO_ELEMENT_DWORDS                4
-#endif
 #define IGU_FIFO_DEPTH_ELEMENTS                64
 #define IGU_FIFO_DEPTH_DWORDS \
        (IGU_FIFO_ELEMENT_DWORDS * IGU_FIFO_DEPTH_ELEMENTS)
 
-#ifndef PROTECTION_OVERRIDE_ELEMENT_DWORDS
 #define PROTECTION_OVERRIDE_ELEMENT_DWORDS     2
-#endif
 #define PROTECTION_OVERRIDE_DEPTH_ELEMENTS     20
 #define PROTECTION_OVERRIDE_DEPTH_DWORDS \
        (PROTECTION_OVERRIDE_DEPTH_ELEMENTS * \
@@ -491,6 +476,11 @@ static struct chip_defs s_chip_defs[MAX_CHIP_IDS] = {
          {{MAX_NUM_PORTS_K2, MAX_NUM_PFS_K2, MAX_NUM_VFS_K2},
           {0, 0, 0},
           {0, 0, 0},
+          {0, 0, 0} } },
+       { "reserved",
+          {{0, 0, 0},
+          {0, 0, 0},
+          {0, 0, 0},
           {0, 0, 0} } }
 };
 
@@ -498,7 +488,8 @@ static struct chip_defs s_chip_defs[MAX_CHIP_IDS] = {
 static struct storm_defs s_storm_defs[] = {
        /* Tstorm */
        {'T', BLOCK_TSEM,
-        {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT}, true,
+        {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT,
+         DBG_BUS_CLIENT_RBCT}, true,
         TSEM_REG_FAST_MEMORY,
         TSEM_REG_DBG_FRAME_MODE_BB_K2, TSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
         TSEM_REG_SLOW_DBG_MODE_BB_K2, TSEM_REG_DBG_MODE1_CFG_BB_K2,
@@ -511,7 +502,8 @@ static struct storm_defs s_storm_defs[] = {
 
        /* Mstorm */
        {'M', BLOCK_MSEM,
-        {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM}, false,
+        {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM,
+         DBG_BUS_CLIENT_RBCM}, false,
         MSEM_REG_FAST_MEMORY,
         MSEM_REG_DBG_FRAME_MODE_BB_K2, MSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
         MSEM_REG_SLOW_DBG_MODE_BB_K2, MSEM_REG_DBG_MODE1_CFG_BB_K2,
@@ -524,7 +516,8 @@ static struct storm_defs s_storm_defs[] = {
 
        /* Ustorm */
        {'U', BLOCK_USEM,
-        {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU}, false,
+        {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU,
+         DBG_BUS_CLIENT_RBCU}, false,
         USEM_REG_FAST_MEMORY,
         USEM_REG_DBG_FRAME_MODE_BB_K2, USEM_REG_SLOW_DBG_ACTIVE_BB_K2,
         USEM_REG_SLOW_DBG_MODE_BB_K2, USEM_REG_DBG_MODE1_CFG_BB_K2,
@@ -537,7 +530,8 @@ static struct storm_defs s_storm_defs[] = {
 
        /* Xstorm */
        {'X', BLOCK_XSEM,
-        {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX}, false,
+        {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX,
+         DBG_BUS_CLIENT_RBCX}, false,
         XSEM_REG_FAST_MEMORY,
         XSEM_REG_DBG_FRAME_MODE_BB_K2, XSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
         XSEM_REG_SLOW_DBG_MODE_BB_K2, XSEM_REG_DBG_MODE1_CFG_BB_K2,
@@ -550,7 +544,8 @@ static struct storm_defs s_storm_defs[] = {
 
        /* Ystorm */
        {'Y', BLOCK_YSEM,
-        {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY}, false,
+        {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY,
+         DBG_BUS_CLIENT_RBCY}, false,
         YSEM_REG_FAST_MEMORY,
         YSEM_REG_DBG_FRAME_MODE_BB_K2, YSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
         YSEM_REG_SLOW_DBG_MODE_BB_K2, YSEM_REG_DBG_MODE1_CFG_BB_K2,
@@ -563,7 +558,8 @@ static struct storm_defs s_storm_defs[] = {
 
        /* Pstorm */
        {'P', BLOCK_PSEM,
-        {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS}, true,
+        {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS,
+         DBG_BUS_CLIENT_RBCS}, true,
         PSEM_REG_FAST_MEMORY,
         PSEM_REG_DBG_FRAME_MODE_BB_K2, PSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
         PSEM_REG_SLOW_DBG_MODE_BB_K2, PSEM_REG_DBG_MODE1_CFG_BB_K2,
@@ -579,8 +575,8 @@ static struct storm_defs s_storm_defs[] = {
 
 static struct block_defs block_grc_defs = {
        "grc",
-       {true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN},
+       {true, true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN},
        GRC_REG_DBG_SELECT, GRC_REG_DBG_DWORD_ENABLE,
        GRC_REG_DBG_SHIFT, GRC_REG_DBG_FORCE_VALID,
        GRC_REG_DBG_FORCE_FRAME,
@@ -588,30 +584,30 @@ static struct block_defs block_grc_defs = {
 };
 
 static struct block_defs block_miscs_defs = {
-       "miscs", {false, false}, false, 0,
-       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+       "miscs", {true, true, true}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
        0, 0, 0, 0, 0,
        false, false, MAX_DBG_RESET_REGS, 0
 };
 
 static struct block_defs block_misc_defs = {
-       "misc", {false, false}, false, 0,
-       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+       "misc", {true, true, true}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
        0, 0, 0, 0, 0,
        false, false, MAX_DBG_RESET_REGS, 0
 };
 
 static struct block_defs block_dbu_defs = {
-       "dbu", {false, false}, false, 0,
-       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+       "dbu", {true, true, true}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
        0, 0, 0, 0, 0,
        false, false, MAX_DBG_RESET_REGS, 0
 };
 
 static struct block_defs block_pglue_b_defs = {
        "pglue_b",
-       {true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH},
+       {true, true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH},
        PGLUE_B_REG_DBG_SELECT, PGLUE_B_REG_DBG_DWORD_ENABLE,
        PGLUE_B_REG_DBG_SHIFT, PGLUE_B_REG_DBG_FORCE_VALID,
        PGLUE_B_REG_DBG_FORCE_FRAME,
@@ -620,25 +616,26 @@ static struct block_defs block_pglue_b_defs = {
 
 static struct block_defs block_cnig_defs = {
        "cnig",
-       {false, true}, false, 0,
-       {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW},
-       CNIG_REG_DBG_SELECT_K2, CNIG_REG_DBG_DWORD_ENABLE_K2,
-       CNIG_REG_DBG_SHIFT_K2, CNIG_REG_DBG_FORCE_VALID_K2,
-       CNIG_REG_DBG_FORCE_FRAME_K2,
+       {true, true, true}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW,
+        DBG_BUS_CLIENT_RBCW},
+       CNIG_REG_DBG_SELECT_K2_E5, CNIG_REG_DBG_DWORD_ENABLE_K2_E5,
+       CNIG_REG_DBG_SHIFT_K2_E5, CNIG_REG_DBG_FORCE_VALID_K2_E5,
+       CNIG_REG_DBG_FORCE_FRAME_K2_E5,
        true, false, DBG_RESET_REG_MISCS_PL_HV, 0
 };
 
 static struct block_defs block_cpmu_defs = {
-       "cpmu", {false, false}, false, 0,
-       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+       "cpmu", {true, true, true}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
        0, 0, 0, 0, 0,
        true, false, DBG_RESET_REG_MISCS_PL_HV, 8
 };
 
 static struct block_defs block_ncsi_defs = {
        "ncsi",
-       {true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
+       {true, true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
        NCSI_REG_DBG_SELECT, NCSI_REG_DBG_DWORD_ENABLE,
        NCSI_REG_DBG_SHIFT, NCSI_REG_DBG_FORCE_VALID,
        NCSI_REG_DBG_FORCE_FRAME,
@@ -646,16 +643,16 @@ static struct block_defs block_ncsi_defs = {
 };
 
 static struct block_defs block_opte_defs = {
-       "opte", {false, false}, false, 0,
-       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+       "opte", {true, true, false}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
        0, 0, 0, 0, 0,
        true, false, DBG_RESET_REG_MISCS_PL_HV, 4
 };
 
 static struct block_defs block_bmb_defs = {
        "bmb",
-       {true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCB},
+       {true, true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCB, DBG_BUS_CLIENT_RBCB},
        BMB_REG_DBG_SELECT, BMB_REG_DBG_DWORD_ENABLE,
        BMB_REG_DBG_SHIFT, BMB_REG_DBG_FORCE_VALID,
        BMB_REG_DBG_FORCE_FRAME,
@@ -664,27 +661,28 @@ static struct block_defs block_bmb_defs = {
 
 static struct block_defs block_pcie_defs = {
        "pcie",
-       {false, true}, false, 0,
-       {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
-       PCIE_REG_DBG_COMMON_SELECT_K2,
-       PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2,
-       PCIE_REG_DBG_COMMON_SHIFT_K2,
-       PCIE_REG_DBG_COMMON_FORCE_VALID_K2,
-       PCIE_REG_DBG_COMMON_FORCE_FRAME_K2,
+       {true, true, true}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH,
+        DBG_BUS_CLIENT_RBCH},
+       PCIE_REG_DBG_COMMON_SELECT_K2_E5,
+       PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2_E5,
+       PCIE_REG_DBG_COMMON_SHIFT_K2_E5,
+       PCIE_REG_DBG_COMMON_FORCE_VALID_K2_E5,
+       PCIE_REG_DBG_COMMON_FORCE_FRAME_K2_E5,
        false, false, MAX_DBG_RESET_REGS, 0
 };
 
 static struct block_defs block_mcp_defs = {
-       "mcp", {false, false}, false, 0,
-       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+       "mcp", {true, true, true}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
        0, 0, 0, 0, 0,
        false, false, MAX_DBG_RESET_REGS, 0
 };
 
 static struct block_defs block_mcp2_defs = {
        "mcp2",
-       {true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
+       {true, true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
        MCP2_REG_DBG_SELECT, MCP2_REG_DBG_DWORD_ENABLE,
        MCP2_REG_DBG_SHIFT, MCP2_REG_DBG_FORCE_VALID,
        MCP2_REG_DBG_FORCE_FRAME,
@@ -693,8 +691,8 @@ static struct block_defs block_mcp2_defs = {
 
 static struct block_defs block_pswhst_defs = {
        "pswhst",
-       {true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+       {true, true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
        PSWHST_REG_DBG_SELECT, PSWHST_REG_DBG_DWORD_ENABLE,
        PSWHST_REG_DBG_SHIFT, PSWHST_REG_DBG_FORCE_VALID,
        PSWHST_REG_DBG_FORCE_FRAME,
@@ -703,8 +701,8 @@ static struct block_defs block_pswhst_defs = {
 
 static struct block_defs block_pswhst2_defs = {
        "pswhst2",
-       {true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+       {true, true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
        PSWHST2_REG_DBG_SELECT, PSWHST2_REG_DBG_DWORD_ENABLE,
        PSWHST2_REG_DBG_SHIFT, PSWHST2_REG_DBG_FORCE_VALID,
        PSWHST2_REG_DBG_FORCE_FRAME,
@@ -713,8 +711,8 @@ static struct block_defs block_pswhst2_defs = {
 
 static struct block_defs block_pswrd_defs = {
        "pswrd",
-       {true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+       {true, true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
        PSWRD_REG_DBG_SELECT, PSWRD_REG_DBG_DWORD_ENABLE,
        PSWRD_REG_DBG_SHIFT, PSWRD_REG_DBG_FORCE_VALID,
        PSWRD_REG_DBG_FORCE_FRAME,
@@ -723,8 +721,8 @@ static struct block_defs block_pswrd_defs = {
 
 static struct block_defs block_pswrd2_defs = {
        "pswrd2",
-       {true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+       {true, true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
        PSWRD2_REG_DBG_SELECT, PSWRD2_REG_DBG_DWORD_ENABLE,
        PSWRD2_REG_DBG_SHIFT, PSWRD2_REG_DBG_FORCE_VALID,
        PSWRD2_REG_DBG_FORCE_FRAME,
@@ -733,8 +731,8 @@ static struct block_defs block_pswrd2_defs = {
 
 static struct block_defs block_pswwr_defs = {
        "pswwr",
-       {true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+       {true, true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
        PSWWR_REG_DBG_SELECT, PSWWR_REG_DBG_DWORD_ENABLE,
        PSWWR_REG_DBG_SHIFT, PSWWR_REG_DBG_FORCE_VALID,
        PSWWR_REG_DBG_FORCE_FRAME,
@@ -742,16 +740,16 @@ static struct block_defs block_pswwr_defs = {
 };
 
 static struct block_defs block_pswwr2_defs = {
-       "pswwr2", {false, false}, false, 0,
-       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+       "pswwr2", {true, true, true}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
        0, 0, 0, 0, 0,
        true, false, DBG_RESET_REG_MISC_PL_HV, 3
 };
 
 static struct block_defs block_pswrq_defs = {
        "pswrq",
-       {true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+       {true, true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
        PSWRQ_REG_DBG_SELECT, PSWRQ_REG_DBG_DWORD_ENABLE,
        PSWRQ_REG_DBG_SHIFT, PSWRQ_REG_DBG_FORCE_VALID,
        PSWRQ_REG_DBG_FORCE_FRAME,
@@ -760,8 +758,8 @@ static struct block_defs block_pswrq_defs = {
 
 static struct block_defs block_pswrq2_defs = {
        "pswrq2",
-       {true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+       {true, true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
        PSWRQ2_REG_DBG_SELECT, PSWRQ2_REG_DBG_DWORD_ENABLE,
        PSWRQ2_REG_DBG_SHIFT, PSWRQ2_REG_DBG_FORCE_VALID,
        PSWRQ2_REG_DBG_FORCE_FRAME,
@@ -770,18 +768,19 @@ static struct block_defs block_pswrq2_defs = {
 
 static struct block_defs block_pglcs_defs = {
        "pglcs",
-       {false, true}, false, 0,
-       {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
-       PGLCS_REG_DBG_SELECT_K2, PGLCS_REG_DBG_DWORD_ENABLE_K2,
-       PGLCS_REG_DBG_SHIFT_K2, PGLCS_REG_DBG_FORCE_VALID_K2,
-       PGLCS_REG_DBG_FORCE_FRAME_K2,
+       {true, true, true}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH,
+        DBG_BUS_CLIENT_RBCH},
+       PGLCS_REG_DBG_SELECT_K2_E5, PGLCS_REG_DBG_DWORD_ENABLE_K2_E5,
+       PGLCS_REG_DBG_SHIFT_K2_E5, PGLCS_REG_DBG_FORCE_VALID_K2_E5,
+       PGLCS_REG_DBG_FORCE_FRAME_K2_E5,
        true, false, DBG_RESET_REG_MISCS_PL_HV, 2
 };
 
 static struct block_defs block_ptu_defs = {
        "ptu",
-       {true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+       {true, true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
        PTU_REG_DBG_SELECT, PTU_REG_DBG_DWORD_ENABLE,
        PTU_REG_DBG_SHIFT, PTU_REG_DBG_FORCE_VALID,
        PTU_REG_DBG_FORCE_FRAME,
@@ -790,8 +789,8 @@ static struct block_defs block_ptu_defs = {
 
 static struct block_defs block_dmae_defs = {
        "dmae",
-       {true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+       {true, true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
        DMAE_REG_DBG_SELECT, DMAE_REG_DBG_DWORD_ENABLE,
        DMAE_REG_DBG_SHIFT, DMAE_REG_DBG_FORCE_VALID,
        DMAE_REG_DBG_FORCE_FRAME,
@@ -800,8 +799,8 @@ static struct block_defs block_dmae_defs = {
 
 static struct block_defs block_tcm_defs = {
        "tcm",
-       {true, true}, true, DBG_TSTORM_ID,
-       {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
+       {true, true, true}, true, DBG_TSTORM_ID,
+       {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
        TCM_REG_DBG_SELECT, TCM_REG_DBG_DWORD_ENABLE,
        TCM_REG_DBG_SHIFT, TCM_REG_DBG_FORCE_VALID,
        TCM_REG_DBG_FORCE_FRAME,
@@ -810,8 +809,8 @@ static struct block_defs block_tcm_defs = {
 
 static struct block_defs block_mcm_defs = {
        "mcm",
-       {true, true}, true, DBG_MSTORM_ID,
-       {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+       {true, true, true}, true, DBG_MSTORM_ID,
+       {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
        MCM_REG_DBG_SELECT, MCM_REG_DBG_DWORD_ENABLE,
        MCM_REG_DBG_SHIFT, MCM_REG_DBG_FORCE_VALID,
        MCM_REG_DBG_FORCE_FRAME,
@@ -820,8 +819,8 @@ static struct block_defs block_mcm_defs = {
 
 static struct block_defs block_ucm_defs = {
        "ucm",
-       {true, true}, true, DBG_USTORM_ID,
-       {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
+       {true, true, true}, true, DBG_USTORM_ID,
+       {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
        UCM_REG_DBG_SELECT, UCM_REG_DBG_DWORD_ENABLE,
        UCM_REG_DBG_SHIFT, UCM_REG_DBG_FORCE_VALID,
        UCM_REG_DBG_FORCE_FRAME,
@@ -830,8 +829,8 @@ static struct block_defs block_ucm_defs = {
 
 static struct block_defs block_xcm_defs = {
        "xcm",
-       {true, true}, true, DBG_XSTORM_ID,
-       {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
+       {true, true, true}, true, DBG_XSTORM_ID,
+       {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
        XCM_REG_DBG_SELECT, XCM_REG_DBG_DWORD_ENABLE,
        XCM_REG_DBG_SHIFT, XCM_REG_DBG_FORCE_VALID,
        XCM_REG_DBG_FORCE_FRAME,
@@ -840,8 +839,8 @@ static struct block_defs block_xcm_defs = {
 
 static struct block_defs block_ycm_defs = {
        "ycm",
-       {true, true}, true, DBG_YSTORM_ID,
-       {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
+       {true, true, true}, true, DBG_YSTORM_ID,
+       {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY},
        YCM_REG_DBG_SELECT, YCM_REG_DBG_DWORD_ENABLE,
        YCM_REG_DBG_SHIFT, YCM_REG_DBG_FORCE_VALID,
        YCM_REG_DBG_FORCE_FRAME,
@@ -850,8 +849,8 @@ static struct block_defs block_ycm_defs = {
 
 static struct block_defs block_pcm_defs = {
        "pcm",
-       {true, true}, true, DBG_PSTORM_ID,
-       {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
+       {true, true, true}, true, DBG_PSTORM_ID,
+       {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
        PCM_REG_DBG_SELECT, PCM_REG_DBG_DWORD_ENABLE,
        PCM_REG_DBG_SHIFT, PCM_REG_DBG_FORCE_VALID,
        PCM_REG_DBG_FORCE_FRAME,
@@ -860,8 +859,8 @@ static struct block_defs block_pcm_defs = {
 
 static struct block_defs block_qm_defs = {
        "qm",
-       {true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCQ},
+       {true, true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCQ, DBG_BUS_CLIENT_RBCQ},
        QM_REG_DBG_SELECT, QM_REG_DBG_DWORD_ENABLE,
        QM_REG_DBG_SHIFT, QM_REG_DBG_FORCE_VALID,
        QM_REG_DBG_FORCE_FRAME,
@@ -870,8 +869,8 @@ static struct block_defs block_qm_defs = {
 
 static struct block_defs block_tm_defs = {
        "tm",
-       {true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
+       {true, true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
        TM_REG_DBG_SELECT, TM_REG_DBG_DWORD_ENABLE,
        TM_REG_DBG_SHIFT, TM_REG_DBG_FORCE_VALID,
        TM_REG_DBG_FORCE_FRAME,
@@ -880,8 +879,8 @@ static struct block_defs block_tm_defs = {
 
 static struct block_defs block_dorq_defs = {
        "dorq",
-       {true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
+       {true, true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY},
        DORQ_REG_DBG_SELECT, DORQ_REG_DBG_DWORD_ENABLE,
        DORQ_REG_DBG_SHIFT, DORQ_REG_DBG_FORCE_VALID,
        DORQ_REG_DBG_FORCE_FRAME,
@@ -890,8 +889,8 @@ static struct block_defs block_dorq_defs = {
 
 static struct block_defs block_brb_defs = {
        "brb",
-       {true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR},
+       {true, true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR},
        BRB_REG_DBG_SELECT, BRB_REG_DBG_DWORD_ENABLE,
        BRB_REG_DBG_SHIFT, BRB_REG_DBG_FORCE_VALID,
        BRB_REG_DBG_FORCE_FRAME,
@@ -900,8 +899,8 @@ static struct block_defs block_brb_defs = {
 
 static struct block_defs block_src_defs = {
        "src",
-       {true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
+       {true, true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
        SRC_REG_DBG_SELECT, SRC_REG_DBG_DWORD_ENABLE,
        SRC_REG_DBG_SHIFT, SRC_REG_DBG_FORCE_VALID,
        SRC_REG_DBG_FORCE_FRAME,
@@ -910,8 +909,8 @@ static struct block_defs block_src_defs = {
 
 static struct block_defs block_prs_defs = {
        "prs",
-       {true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR},
+       {true, true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR},
        PRS_REG_DBG_SELECT, PRS_REG_DBG_DWORD_ENABLE,
        PRS_REG_DBG_SHIFT, PRS_REG_DBG_FORCE_VALID,
        PRS_REG_DBG_FORCE_FRAME,
@@ -920,8 +919,8 @@ static struct block_defs block_prs_defs = {
 
 static struct block_defs block_tsdm_defs = {
        "tsdm",
-       {true, true}, true, DBG_TSTORM_ID,
-       {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
+       {true, true, true}, true, DBG_TSTORM_ID,
+       {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
        TSDM_REG_DBG_SELECT, TSDM_REG_DBG_DWORD_ENABLE,
        TSDM_REG_DBG_SHIFT, TSDM_REG_DBG_FORCE_VALID,
        TSDM_REG_DBG_FORCE_FRAME,
@@ -930,8 +929,8 @@ static struct block_defs block_tsdm_defs = {
 
 static struct block_defs block_msdm_defs = {
        "msdm",
-       {true, true}, true, DBG_MSTORM_ID,
-       {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+       {true, true, true}, true, DBG_MSTORM_ID,
+       {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
        MSDM_REG_DBG_SELECT, MSDM_REG_DBG_DWORD_ENABLE,
        MSDM_REG_DBG_SHIFT, MSDM_REG_DBG_FORCE_VALID,
        MSDM_REG_DBG_FORCE_FRAME,
@@ -940,8 +939,8 @@ static struct block_defs block_msdm_defs = {
 
 static struct block_defs block_usdm_defs = {
        "usdm",
-       {true, true}, true, DBG_USTORM_ID,
-       {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
+       {true, true, true}, true, DBG_USTORM_ID,
+       {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
        USDM_REG_DBG_SELECT, USDM_REG_DBG_DWORD_ENABLE,
        USDM_REG_DBG_SHIFT, USDM_REG_DBG_FORCE_VALID,
        USDM_REG_DBG_FORCE_FRAME,
@@ -950,8 +949,8 @@ static struct block_defs block_usdm_defs = {
 
 static struct block_defs block_xsdm_defs = {
        "xsdm",
-       {true, true}, true, DBG_XSTORM_ID,
-       {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
+       {true, true, true}, true, DBG_XSTORM_ID,
+       {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
        XSDM_REG_DBG_SELECT, XSDM_REG_DBG_DWORD_ENABLE,
        XSDM_REG_DBG_SHIFT, XSDM_REG_DBG_FORCE_VALID,
        XSDM_REG_DBG_FORCE_FRAME,
@@ -960,8 +959,8 @@ static struct block_defs block_xsdm_defs = {
 
 static struct block_defs block_ysdm_defs = {
        "ysdm",
-       {true, true}, true, DBG_YSTORM_ID,
-       {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
+       {true, true, true}, true, DBG_YSTORM_ID,
+       {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY},
        YSDM_REG_DBG_SELECT, YSDM_REG_DBG_DWORD_ENABLE,
        YSDM_REG_DBG_SHIFT, YSDM_REG_DBG_FORCE_VALID,
        YSDM_REG_DBG_FORCE_FRAME,
@@ -970,8 +969,8 @@ static struct block_defs block_ysdm_defs = {
 
 static struct block_defs block_psdm_defs = {
        "psdm",
-       {true, true}, true, DBG_PSTORM_ID,
-       {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
+       {true, true, true}, true, DBG_PSTORM_ID,
+       {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
        PSDM_REG_DBG_SELECT, PSDM_REG_DBG_DWORD_ENABLE,
        PSDM_REG_DBG_SHIFT, PSDM_REG_DBG_FORCE_VALID,
        PSDM_REG_DBG_FORCE_FRAME,
@@ -980,8 +979,8 @@ static struct block_defs block_psdm_defs = {
 
 static struct block_defs block_tsem_defs = {
        "tsem",
-       {true, true}, true, DBG_TSTORM_ID,
-       {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
+       {true, true, true}, true, DBG_TSTORM_ID,
+       {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
        TSEM_REG_DBG_SELECT, TSEM_REG_DBG_DWORD_ENABLE,
        TSEM_REG_DBG_SHIFT, TSEM_REG_DBG_FORCE_VALID,
        TSEM_REG_DBG_FORCE_FRAME,
@@ -990,8 +989,8 @@ static struct block_defs block_tsem_defs = {
 
 static struct block_defs block_msem_defs = {
        "msem",
-       {true, true}, true, DBG_MSTORM_ID,
-       {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+       {true, true, true}, true, DBG_MSTORM_ID,
+       {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
        MSEM_REG_DBG_SELECT, MSEM_REG_DBG_DWORD_ENABLE,
        MSEM_REG_DBG_SHIFT, MSEM_REG_DBG_FORCE_VALID,
        MSEM_REG_DBG_FORCE_FRAME,
@@ -1000,8 +999,8 @@ static struct block_defs block_msem_defs = {
 
 static struct block_defs block_usem_defs = {
        "usem",
-       {true, true}, true, DBG_USTORM_ID,
-       {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
+       {true, true, true}, true, DBG_USTORM_ID,
+       {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
        USEM_REG_DBG_SELECT, USEM_REG_DBG_DWORD_ENABLE,
        USEM_REG_DBG_SHIFT, USEM_REG_DBG_FORCE_VALID,
        USEM_REG_DBG_FORCE_FRAME,
@@ -1010,8 +1009,8 @@ static struct block_defs block_usem_defs = {
 
 static struct block_defs block_xsem_defs = {
        "xsem",
-       {true, true}, true, DBG_XSTORM_ID,
-       {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
+       {true, true, true}, true, DBG_XSTORM_ID,
+       {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
        XSEM_REG_DBG_SELECT, XSEM_REG_DBG_DWORD_ENABLE,
        XSEM_REG_DBG_SHIFT, XSEM_REG_DBG_FORCE_VALID,
        XSEM_REG_DBG_FORCE_FRAME,
@@ -1020,8 +1019,8 @@ static struct block_defs block_xsem_defs = {
 
 static struct block_defs block_ysem_defs = {
        "ysem",
-       {true, true}, true, DBG_YSTORM_ID,
-       {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
+       {true, true, true}, true, DBG_YSTORM_ID,
+       {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY},
        YSEM_REG_DBG_SELECT, YSEM_REG_DBG_DWORD_ENABLE,
        YSEM_REG_DBG_SHIFT, YSEM_REG_DBG_FORCE_VALID,
        YSEM_REG_DBG_FORCE_FRAME,
@@ -1030,8 +1029,8 @@ static struct block_defs block_ysem_defs = {
 
 static struct block_defs block_psem_defs = {
        "psem",
-       {true, true}, true, DBG_PSTORM_ID,
-       {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
+       {true, true, true}, true, DBG_PSTORM_ID,
+       {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
        PSEM_REG_DBG_SELECT, PSEM_REG_DBG_DWORD_ENABLE,
        PSEM_REG_DBG_SHIFT, PSEM_REG_DBG_FORCE_VALID,
        PSEM_REG_DBG_FORCE_FRAME,
@@ -1040,8 +1039,8 @@ static struct block_defs block_psem_defs = {
 
 static struct block_defs block_rss_defs = {
        "rss",
-       {true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
+       {true, true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
        RSS_REG_DBG_SELECT, RSS_REG_DBG_DWORD_ENABLE,
        RSS_REG_DBG_SHIFT, RSS_REG_DBG_FORCE_VALID,
        RSS_REG_DBG_FORCE_FRAME,
@@ -1050,8 +1049,8 @@ static struct block_defs block_rss_defs = {
 
 static struct block_defs block_tmld_defs = {
        "tmld",
-       {true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+       {true, true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
        TMLD_REG_DBG_SELECT, TMLD_REG_DBG_DWORD_ENABLE,
        TMLD_REG_DBG_SHIFT, TMLD_REG_DBG_FORCE_VALID,
        TMLD_REG_DBG_FORCE_FRAME,
@@ -1060,8 +1059,8 @@ static struct block_defs block_tmld_defs = {
 
 static struct block_defs block_muld_defs = {
        "muld",
-       {true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
+       {true, true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
        MULD_REG_DBG_SELECT, MULD_REG_DBG_DWORD_ENABLE,
        MULD_REG_DBG_SHIFT, MULD_REG_DBG_FORCE_VALID,
        MULD_REG_DBG_FORCE_FRAME,
@@ -1070,8 +1069,9 @@ static struct block_defs block_muld_defs = {
 
 static struct block_defs block_yuld_defs = {
        "yuld",
-       {true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
+       {true, true, false}, false, 0,
+       {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU,
+        MAX_DBG_BUS_CLIENTS},
        YULD_REG_DBG_SELECT_BB_K2, YULD_REG_DBG_DWORD_ENABLE_BB_K2,
        YULD_REG_DBG_SHIFT_BB_K2, YULD_REG_DBG_FORCE_VALID_BB_K2,
        YULD_REG_DBG_FORCE_FRAME_BB_K2,
@@ -1081,18 +1081,40 @@ static struct block_defs block_yuld_defs = {
 
 static struct block_defs block_xyld_defs = {
        "xyld",
-       {true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
+       {true, true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
        XYLD_REG_DBG_SELECT, XYLD_REG_DBG_DWORD_ENABLE,
        XYLD_REG_DBG_SHIFT, XYLD_REG_DBG_FORCE_VALID,
        XYLD_REG_DBG_FORCE_FRAME,
        true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 12
 };
 
+static struct block_defs block_ptld_defs = {
+       "ptld",
+       {false, false, true}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCT},
+       PTLD_REG_DBG_SELECT_E5, PTLD_REG_DBG_DWORD_ENABLE_E5,
+       PTLD_REG_DBG_SHIFT_E5, PTLD_REG_DBG_FORCE_VALID_E5,
+       PTLD_REG_DBG_FORCE_FRAME_E5,
+       true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2,
+       28
+};
+
+static struct block_defs block_ypld_defs = {
+       "ypld",
+       {false, false, true}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCS},
+       YPLD_REG_DBG_SELECT_E5, YPLD_REG_DBG_DWORD_ENABLE_E5,
+       YPLD_REG_DBG_SHIFT_E5, YPLD_REG_DBG_FORCE_VALID_E5,
+       YPLD_REG_DBG_FORCE_FRAME_E5,
+       true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2,
+       27
+};
+
 static struct block_defs block_prm_defs = {
        "prm",
-       {true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+       {true, true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
        PRM_REG_DBG_SELECT, PRM_REG_DBG_DWORD_ENABLE,
        PRM_REG_DBG_SHIFT, PRM_REG_DBG_FORCE_VALID,
        PRM_REG_DBG_FORCE_FRAME,
@@ -1101,8 +1123,8 @@ static struct block_defs block_prm_defs = {
 
 static struct block_defs block_pbf_pb1_defs = {
        "pbf_pb1",
-       {true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV},
+       {true, true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV},
        PBF_PB1_REG_DBG_SELECT, PBF_PB1_REG_DBG_DWORD_ENABLE,
        PBF_PB1_REG_DBG_SHIFT, PBF_PB1_REG_DBG_FORCE_VALID,
        PBF_PB1_REG_DBG_FORCE_FRAME,
@@ -1112,8 +1134,8 @@ static struct block_defs block_pbf_pb1_defs = {
 
 static struct block_defs block_pbf_pb2_defs = {
        "pbf_pb2",
-       {true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV},
+       {true, true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV},
        PBF_PB2_REG_DBG_SELECT, PBF_PB2_REG_DBG_DWORD_ENABLE,
        PBF_PB2_REG_DBG_SHIFT, PBF_PB2_REG_DBG_FORCE_VALID,
        PBF_PB2_REG_DBG_FORCE_FRAME,
@@ -1123,8 +1145,8 @@ static struct block_defs block_pbf_pb2_defs = {
 
 static struct block_defs block_rpb_defs = {
        "rpb",
-       {true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+       {true, true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
        RPB_REG_DBG_SELECT, RPB_REG_DBG_DWORD_ENABLE,
        RPB_REG_DBG_SHIFT, RPB_REG_DBG_FORCE_VALID,
        RPB_REG_DBG_FORCE_FRAME,
@@ -1133,8 +1155,8 @@ static struct block_defs block_rpb_defs = {
 
 static struct block_defs block_btb_defs = {
        "btb",
-       {true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCV},
+       {true, true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV},
        BTB_REG_DBG_SELECT, BTB_REG_DBG_DWORD_ENABLE,
        BTB_REG_DBG_SHIFT, BTB_REG_DBG_FORCE_VALID,
        BTB_REG_DBG_FORCE_FRAME,
@@ -1143,8 +1165,8 @@ static struct block_defs block_btb_defs = {
 
 static struct block_defs block_pbf_defs = {
        "pbf",
-       {true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV},
+       {true, true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV},
        PBF_REG_DBG_SELECT, PBF_REG_DBG_DWORD_ENABLE,
        PBF_REG_DBG_SHIFT, PBF_REG_DBG_FORCE_VALID,
        PBF_REG_DBG_FORCE_FRAME,
@@ -1153,8 +1175,8 @@ static struct block_defs block_pbf_defs = {
 
 static struct block_defs block_rdif_defs = {
        "rdif",
-       {true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+       {true, true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
        RDIF_REG_DBG_SELECT, RDIF_REG_DBG_DWORD_ENABLE,
        RDIF_REG_DBG_SHIFT, RDIF_REG_DBG_FORCE_VALID,
        RDIF_REG_DBG_FORCE_FRAME,
@@ -1163,8 +1185,8 @@ static struct block_defs block_rdif_defs = {
 
 static struct block_defs block_tdif_defs = {
        "tdif",
-       {true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
+       {true, true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
        TDIF_REG_DBG_SELECT, TDIF_REG_DBG_DWORD_ENABLE,
        TDIF_REG_DBG_SHIFT, TDIF_REG_DBG_FORCE_VALID,
        TDIF_REG_DBG_FORCE_FRAME,
@@ -1173,8 +1195,8 @@ static struct block_defs block_tdif_defs = {
 
 static struct block_defs block_cdu_defs = {
        "cdu",
-       {true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
+       {true, true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
        CDU_REG_DBG_SELECT, CDU_REG_DBG_DWORD_ENABLE,
        CDU_REG_DBG_SHIFT, CDU_REG_DBG_FORCE_VALID,
        CDU_REG_DBG_FORCE_FRAME,
@@ -1183,8 +1205,8 @@ static struct block_defs block_cdu_defs = {
 
 static struct block_defs block_ccfc_defs = {
        "ccfc",
-       {true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
+       {true, true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
        CCFC_REG_DBG_SELECT, CCFC_REG_DBG_DWORD_ENABLE,
        CCFC_REG_DBG_SHIFT, CCFC_REG_DBG_FORCE_VALID,
        CCFC_REG_DBG_FORCE_FRAME,
@@ -1193,8 +1215,8 @@ static struct block_defs block_ccfc_defs = {
 
 static struct block_defs block_tcfc_defs = {
        "tcfc",
-       {true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
+       {true, true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
        TCFC_REG_DBG_SELECT, TCFC_REG_DBG_DWORD_ENABLE,
        TCFC_REG_DBG_SHIFT, TCFC_REG_DBG_FORCE_VALID,
        TCFC_REG_DBG_FORCE_FRAME,
@@ -1203,8 +1225,8 @@ static struct block_defs block_tcfc_defs = {
 
 static struct block_defs block_igu_defs = {
        "igu",
-       {true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+       {true, true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
        IGU_REG_DBG_SELECT, IGU_REG_DBG_DWORD_ENABLE,
        IGU_REG_DBG_SHIFT, IGU_REG_DBG_FORCE_VALID,
        IGU_REG_DBG_FORCE_FRAME,
@@ -1213,42 +1235,79 @@ static struct block_defs block_igu_defs = {
 
 static struct block_defs block_cau_defs = {
        "cau",
-       {true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+       {true, true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
        CAU_REG_DBG_SELECT, CAU_REG_DBG_DWORD_ENABLE,
        CAU_REG_DBG_SHIFT, CAU_REG_DBG_FORCE_VALID,
        CAU_REG_DBG_FORCE_FRAME,
        true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 19
 };
 
+static struct block_defs block_rgfs_defs = {
+       "rgfs", {false, false, true}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+       0, 0, 0, 0, 0,
+       true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 29
+};
+
+static struct block_defs block_rgsrc_defs = {
+       "rgsrc",
+       {false, false, true}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
+       RGSRC_REG_DBG_SELECT_E5, RGSRC_REG_DBG_DWORD_ENABLE_E5,
+       RGSRC_REG_DBG_SHIFT_E5, RGSRC_REG_DBG_FORCE_VALID_E5,
+       RGSRC_REG_DBG_FORCE_FRAME_E5,
+       true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1,
+       30
+};
+
+static struct block_defs block_tgfs_defs = {
+       "tgfs", {false, false, true}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+       0, 0, 0, 0, 0,
+       true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 30
+};
+
+static struct block_defs block_tgsrc_defs = {
+       "tgsrc",
+       {false, false, true}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCV},
+       TGSRC_REG_DBG_SELECT_E5, TGSRC_REG_DBG_DWORD_ENABLE_E5,
+       TGSRC_REG_DBG_SHIFT_E5, TGSRC_REG_DBG_FORCE_VALID_E5,
+       TGSRC_REG_DBG_FORCE_FRAME_E5,
+       true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1,
+       31
+};
+
 static struct block_defs block_umac_defs = {
        "umac",
-       {false, true}, false, 0,
-       {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ},
-       UMAC_REG_DBG_SELECT_K2, UMAC_REG_DBG_DWORD_ENABLE_K2,
-       UMAC_REG_DBG_SHIFT_K2, UMAC_REG_DBG_FORCE_VALID_K2,
-       UMAC_REG_DBG_FORCE_FRAME_K2,
+       {true, true, true}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ,
+        DBG_BUS_CLIENT_RBCZ},
+       UMAC_REG_DBG_SELECT_K2_E5, UMAC_REG_DBG_DWORD_ENABLE_K2_E5,
+       UMAC_REG_DBG_SHIFT_K2_E5, UMAC_REG_DBG_FORCE_VALID_K2_E5,
+       UMAC_REG_DBG_FORCE_FRAME_K2_E5,
        true, false, DBG_RESET_REG_MISCS_PL_HV, 6
 };
 
 static struct block_defs block_xmac_defs = {
-       "xmac", {false, false}, false, 0,
-       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+       "xmac", {true, false, false}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
        0, 0, 0, 0, 0,
        false, false, MAX_DBG_RESET_REGS, 0
 };
 
 static struct block_defs block_dbg_defs = {
-       "dbg", {false, false}, false, 0,
-       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+       "dbg", {true, true, true}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
        0, 0, 0, 0, 0,
        true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 3
 };
 
 static struct block_defs block_nig_defs = {
        "nig",
-       {true, true}, false, 0,
-       {DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN},
+       {true, true, true}, false, 0,
+       {DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN},
        NIG_REG_DBG_SELECT, NIG_REG_DBG_DWORD_ENABLE,
        NIG_REG_DBG_SHIFT, NIG_REG_DBG_FORCE_VALID,
        NIG_REG_DBG_FORCE_FRAME,
@@ -1257,139 +1316,106 @@ static struct block_defs block_nig_defs = {
 
 static struct block_defs block_wol_defs = {
        "wol",
-       {false, true}, false, 0,
-       {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ},
-       WOL_REG_DBG_SELECT_K2, WOL_REG_DBG_DWORD_ENABLE_K2,
-       WOL_REG_DBG_SHIFT_K2, WOL_REG_DBG_FORCE_VALID_K2,
-       WOL_REG_DBG_FORCE_FRAME_K2,
+       {false, true, true}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
+       WOL_REG_DBG_SELECT_K2_E5, WOL_REG_DBG_DWORD_ENABLE_K2_E5,
+       WOL_REG_DBG_SHIFT_K2_E5, WOL_REG_DBG_FORCE_VALID_K2_E5,
+       WOL_REG_DBG_FORCE_FRAME_K2_E5,
        true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 7
 };
 
 static struct block_defs block_bmbn_defs = {
        "bmbn",
-       {false, true}, false, 0,
-       {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCB},
-       BMBN_REG_DBG_SELECT_K2, BMBN_REG_DBG_DWORD_ENABLE_K2,
-       BMBN_REG_DBG_SHIFT_K2, BMBN_REG_DBG_FORCE_VALID_K2,
-       BMBN_REG_DBG_FORCE_FRAME_K2,
+       {false, true, true}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCB,
+        DBG_BUS_CLIENT_RBCB},
+       BMBN_REG_DBG_SELECT_K2_E5, BMBN_REG_DBG_DWORD_ENABLE_K2_E5,
+       BMBN_REG_DBG_SHIFT_K2_E5, BMBN_REG_DBG_FORCE_VALID_K2_E5,
+       BMBN_REG_DBG_FORCE_FRAME_K2_E5,
        false, false, MAX_DBG_RESET_REGS, 0
 };
 
 static struct block_defs block_ipc_defs = {
-       "ipc", {false, false}, false, 0,
-       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+       "ipc", {true, true, true}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
        0, 0, 0, 0, 0,
        true, false, DBG_RESET_REG_MISCS_PL_UA, 8
 };
 
 static struct block_defs block_nwm_defs = {
        "nwm",
-       {false, true}, false, 0,
-       {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW},
-       NWM_REG_DBG_SELECT_K2, NWM_REG_DBG_DWORD_ENABLE_K2,
-       NWM_REG_DBG_SHIFT_K2, NWM_REG_DBG_FORCE_VALID_K2,
-       NWM_REG_DBG_FORCE_FRAME_K2,
+       {false, true, true}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW, DBG_BUS_CLIENT_RBCW},
+       NWM_REG_DBG_SELECT_K2_E5, NWM_REG_DBG_DWORD_ENABLE_K2_E5,
+       NWM_REG_DBG_SHIFT_K2_E5, NWM_REG_DBG_FORCE_VALID_K2_E5,
+       NWM_REG_DBG_FORCE_FRAME_K2_E5,
        true, false, DBG_RESET_REG_MISCS_PL_HV_2, 0
 };
 
 static struct block_defs block_nws_defs = {
        "nws",
-       {false, true}, false, 0,
-       {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW},
-       NWS_REG_DBG_SELECT_K2, NWS_REG_DBG_DWORD_ENABLE_K2,
-       NWS_REG_DBG_SHIFT_K2, NWS_REG_DBG_FORCE_VALID_K2,
-       NWS_REG_DBG_FORCE_FRAME_K2,
+       {false, true, true}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW, DBG_BUS_CLIENT_RBCW},
+       NWS_REG_DBG_SELECT_K2_E5, NWS_REG_DBG_DWORD_ENABLE_K2_E5,
+       NWS_REG_DBG_SHIFT_K2_E5, NWS_REG_DBG_FORCE_VALID_K2_E5,
+       NWS_REG_DBG_FORCE_FRAME_K2_E5,
        true, false, DBG_RESET_REG_MISCS_PL_HV, 12
 };
 
 static struct block_defs block_ms_defs = {
        "ms",
-       {false, true}, false, 0,
-       {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ},
-       MS_REG_DBG_SELECT_K2, MS_REG_DBG_DWORD_ENABLE_K2,
-       MS_REG_DBG_SHIFT_K2, MS_REG_DBG_FORCE_VALID_K2,
-       MS_REG_DBG_FORCE_FRAME_K2,
+       {false, true, true}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
+       MS_REG_DBG_SELECT_K2_E5, MS_REG_DBG_DWORD_ENABLE_K2_E5,
+       MS_REG_DBG_SHIFT_K2_E5, MS_REG_DBG_FORCE_VALID_K2_E5,
+       MS_REG_DBG_FORCE_FRAME_K2_E5,
        true, false, DBG_RESET_REG_MISCS_PL_HV, 13
 };
 
 static struct block_defs block_phy_pcie_defs = {
        "phy_pcie",
-       {false, true}, false, 0,
-       {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
-       PCIE_REG_DBG_COMMON_SELECT_K2,
-       PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2,
-       PCIE_REG_DBG_COMMON_SHIFT_K2,
-       PCIE_REG_DBG_COMMON_FORCE_VALID_K2,
-       PCIE_REG_DBG_COMMON_FORCE_FRAME_K2,
+       {false, true, true}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH,
+        DBG_BUS_CLIENT_RBCH},
+       PCIE_REG_DBG_COMMON_SELECT_K2_E5,
+       PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2_E5,
+       PCIE_REG_DBG_COMMON_SHIFT_K2_E5,
+       PCIE_REG_DBG_COMMON_FORCE_VALID_K2_E5,
+       PCIE_REG_DBG_COMMON_FORCE_FRAME_K2_E5,
        false, false, MAX_DBG_RESET_REGS, 0
 };
 
 static struct block_defs block_led_defs = {
-       "led", {false, false}, false, 0,
-       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+       "led", {false, true, true}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
        0, 0, 0, 0, 0,
        true, false, DBG_RESET_REG_MISCS_PL_HV, 14
 };
 
 static struct block_defs block_avs_wrap_defs = {
-       "avs_wrap", {false, false}, false, 0,
-       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+       "avs_wrap", {false, true, false}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
        0, 0, 0, 0, 0,
        true, false, DBG_RESET_REG_MISCS_PL_UA, 11
 };
 
-static struct block_defs block_rgfs_defs = {
-       "rgfs", {false, false}, false, 0,
-       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
-       0, 0, 0, 0, 0,
-       false, false, MAX_DBG_RESET_REGS, 0
-};
-
-static struct block_defs block_rgsrc_defs = {
-       "rgsrc", {false, false}, false, 0,
-       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
-       0, 0, 0, 0, 0,
-       false, false, MAX_DBG_RESET_REGS, 0
-};
-
-static struct block_defs block_tgfs_defs = {
-       "tgfs", {false, false}, false, 0,
-       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
-       0, 0, 0, 0, 0,
-       false, false, MAX_DBG_RESET_REGS, 0
-};
-
-static struct block_defs block_tgsrc_defs = {
-       "tgsrc", {false, false}, false, 0,
-       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
-       0, 0, 0, 0, 0,
-       false, false, MAX_DBG_RESET_REGS, 0
-};
-
-static struct block_defs block_ptld_defs = {
-       "ptld", {false, false}, false, 0,
-       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
-       0, 0, 0, 0, 0,
-       false, false, MAX_DBG_RESET_REGS, 0
-};
-
-static struct block_defs block_ypld_defs = {
-       "ypld", {false, false}, false, 0,
-       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+static struct block_defs block_pxpreqbus_defs = {
+       "pxpreqbus", {false, false, false}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
        0, 0, 0, 0, 0,
        false, false, MAX_DBG_RESET_REGS, 0
 };
 
 static struct block_defs block_misc_aeu_defs = {
-       "misc_aeu", {false, false}, false, 0,
-       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+       "misc_aeu", {true, true, true}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
        0, 0, 0, 0, 0,
        false, false, MAX_DBG_RESET_REGS, 0
 };
 
 static struct block_defs block_bar0_map_defs = {
-       "bar0_map", {false, false}, false, 0,
-       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+       "bar0_map", {true, true, true}, false, 0,
+       {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
        0, 0, 0, 0, 0,
        false, false, MAX_DBG_RESET_REGS, 0
 };
@@ -1480,164 +1506,160 @@ static struct block_defs *s_block_defs[MAX_BLOCK_ID] = {
        &block_phy_pcie_defs,
        &block_led_defs,
        &block_avs_wrap_defs,
+       &block_pxpreqbus_defs,
        &block_misc_aeu_defs,
        &block_bar0_map_defs,
 };
 
 static struct platform_defs s_platform_defs[] = {
-       {"asic", 1},
-       {"reserved", 0},
-       {"reserved2", 0},
-       {"reserved3", 0}
+       {"asic", 1, 256, 32768},
+       {"reserved", 0, 0, 0},
+       {"reserved2", 0, 0, 0},
+       {"reserved3", 0, 0, 0}
 };
 
 static struct grc_param_defs s_grc_param_defs[] = {
        /* DBG_GRC_PARAM_DUMP_TSTORM */
-       {{1, 1}, 0, 1, false, 1, 1},
+       {{1, 1, 1}, 0, 1, false, 1, 1},
 
        /* DBG_GRC_PARAM_DUMP_MSTORM */
-       {{1, 1}, 0, 1, false, 1, 1},
+       {{1, 1, 1}, 0, 1, false, 1, 1},
 
        /* DBG_GRC_PARAM_DUMP_USTORM */
-       {{1, 1}, 0, 1, false, 1, 1},
+       {{1, 1, 1}, 0, 1, false, 1, 1},
 
        /* DBG_GRC_PARAM_DUMP_XSTORM */
-       {{1, 1}, 0, 1, false, 1, 1},
+       {{1, 1, 1}, 0, 1, false, 1, 1},
 
        /* DBG_GRC_PARAM_DUMP_YSTORM */
-       {{1, 1}, 0, 1, false, 1, 1},
+       {{1, 1, 1}, 0, 1, false, 1, 1},
 
        /* DBG_GRC_PARAM_DUMP_PSTORM */
-       {{1, 1}, 0, 1, false, 1, 1},
+       {{1, 1, 1}, 0, 1, false, 1, 1},
 
        /* DBG_GRC_PARAM_DUMP_REGS */
-       {{1, 1}, 0, 1, false, 0, 1},
+       {{1, 1, 1}, 0, 1, false, 0, 1},
 
        /* DBG_GRC_PARAM_DUMP_RAM */
-       {{1, 1}, 0, 1, false, 0, 1},
+       {{1, 1, 1}, 0, 1, false, 0, 1},
 
        /* DBG_GRC_PARAM_DUMP_PBUF */
-       {{1, 1}, 0, 1, false, 0, 1},
+       {{1, 1, 1}, 0, 1, false, 0, 1},
 
        /* DBG_GRC_PARAM_DUMP_IOR */
-       {{0, 0}, 0, 1, false, 0, 1},
+       {{0, 0, 0}, 0, 1, false, 0, 1},
 
        /* DBG_GRC_PARAM_DUMP_VFC */
-       {{0, 0}, 0, 1, false, 0, 1},
+       {{0, 0, 0}, 0, 1, false, 0, 1},
 
        /* DBG_GRC_PARAM_DUMP_CM_CTX */
-       {{1, 1}, 0, 1, false, 0, 1},
+       {{1, 1, 1}, 0, 1, false, 0, 1},
 
        /* DBG_GRC_PARAM_DUMP_ILT */
-       {{1, 1}, 0, 1, false, 0, 1},
+       {{1, 1, 1}, 0, 1, false, 0, 1},
 
        /* DBG_GRC_PARAM_DUMP_RSS */
-       {{1, 1}, 0, 1, false, 0, 1},
+       {{1, 1, 1}, 0, 1, false, 0, 1},
 
        /* DBG_GRC_PARAM_DUMP_CAU */
-       {{1, 1}, 0, 1, false, 0, 1},
+       {{1, 1, 1}, 0, 1, false, 0, 1},
 
        /* DBG_GRC_PARAM_DUMP_QM */
-       {{1, 1}, 0, 1, false, 0, 1},
+       {{1, 1, 1}, 0, 1, false, 0, 1},
 
        /* DBG_GRC_PARAM_DUMP_MCP */
-       {{1, 1}, 0, 1, false, 0, 1},
+       {{1, 1, 1}, 0, 1, false, 0, 1},
 
        /* DBG_GRC_PARAM_RESERVED */
-       {{1, 1}, 0, 1, false, 0, 1},
+       {{1, 1, 1}, 0, 1, false, 0, 1},
 
        /* DBG_GRC_PARAM_DUMP_CFC */
-       {{1, 1}, 0, 1, false, 0, 1},
+       {{1, 1, 1}, 0, 1, false, 0, 1},
 
        /* DBG_GRC_PARAM_DUMP_IGU */
-       {{1, 1}, 0, 1, false, 0, 1},
+       {{1, 1, 1}, 0, 1, false, 0, 1},
 
        /* DBG_GRC_PARAM_DUMP_BRB */
-       {{0, 0}, 0, 1, false, 0, 1},
+       {{0, 0, 0}, 0, 1, false, 0, 1},
 
        /* DBG_GRC_PARAM_DUMP_BTB */
-       {{0, 0}, 0, 1, false, 0, 1},
+       {{0, 0, 0}, 0, 1, false, 0, 1},
 
        /* DBG_GRC_PARAM_DUMP_BMB */
-       {{0, 0}, 0, 1, false, 0, 1},
+       {{0, 0, 0}, 0, 1, false, 0, 1},
 
        /* DBG_GRC_PARAM_DUMP_NIG */
-       {{1, 1}, 0, 1, false, 0, 1},
+       {{1, 1, 1}, 0, 1, false, 0, 1},
 
        /* DBG_GRC_PARAM_DUMP_MULD */
-       {{1, 1}, 0, 1, false, 0, 1},
+       {{1, 1, 1}, 0, 1, false, 0, 1},
 
        /* DBG_GRC_PARAM_DUMP_PRS */
-       {{1, 1}, 0, 1, false, 0, 1},
+       {{1, 1, 1}, 0, 1, false, 0, 1},
 
        /* DBG_GRC_PARAM_DUMP_DMAE */
-       {{1, 1}, 0, 1, false, 0, 1},
+       {{1, 1, 1}, 0, 1, false, 0, 1},
 
        /* DBG_GRC_PARAM_DUMP_TM */
-       {{1, 1}, 0, 1, false, 0, 1},
+       {{1, 1, 1}, 0, 1, false, 0, 1},
 
        /* DBG_GRC_PARAM_DUMP_SDM */
-       {{1, 1}, 0, 1, false, 0, 1},
+       {{1, 1, 1}, 0, 1, false, 0, 1},
 
        /* DBG_GRC_PARAM_DUMP_DIF */
-       {{1, 1}, 0, 1, false, 0, 1},
+       {{1, 1, 1}, 0, 1, false, 0, 1},
 
        /* DBG_GRC_PARAM_DUMP_STATIC */
-       {{1, 1}, 0, 1, false, 0, 1},
+       {{1, 1, 1}, 0, 1, false, 0, 1},
 
        /* DBG_GRC_PARAM_UNSTALL */
-       {{0, 0}, 0, 1, false, 0, 0},
+       {{0, 0, 0}, 0, 1, false, 0, 0},
 
        /* DBG_GRC_PARAM_NUM_LCIDS */
-       {{MAX_LCIDS, MAX_LCIDS}, 1, MAX_LCIDS, false, MAX_LCIDS,
+       {{MAX_LCIDS, MAX_LCIDS, MAX_LCIDS}, 1, MAX_LCIDS, false, MAX_LCIDS,
         MAX_LCIDS},
 
        /* DBG_GRC_PARAM_NUM_LTIDS */
-       {{MAX_LTIDS, MAX_LTIDS}, 1, MAX_LTIDS, false, MAX_LTIDS,
+       {{MAX_LTIDS, MAX_LTIDS, MAX_LTIDS}, 1, MAX_LTIDS, false, MAX_LTIDS,
         MAX_LTIDS},
 
        /* DBG_GRC_PARAM_EXCLUDE_ALL */
-       {{0, 0}, 0, 1, true, 0, 0},
+       {{0, 0, 0}, 0, 1, true, 0, 0},
 
        /* DBG_GRC_PARAM_CRASH */
-       {{0, 0}, 0, 1, true, 0, 0},
+       {{0, 0, 0}, 0, 1, true, 0, 0},
 
        /* DBG_GRC_PARAM_PARITY_SAFE */
-       {{0, 0}, 0, 1, false, 1, 0},
+       {{0, 0, 0}, 0, 1, false, 1, 0},
 
        /* DBG_GRC_PARAM_DUMP_CM */
-       {{1, 1}, 0, 1, false, 0, 1},
+       {{1, 1, 1}, 0, 1, false, 0, 1},
 
        /* DBG_GRC_PARAM_DUMP_PHY */
-       {{1, 1}, 0, 1, false, 0, 1},
+       {{1, 1, 1}, 0, 1, false, 0, 1},
 
        /* DBG_GRC_PARAM_NO_MCP */
-       {{0, 0}, 0, 1, false, 0, 0},
+       {{0, 0, 0}, 0, 1, false, 0, 0},
 
        /* DBG_GRC_PARAM_NO_FW_VER */
-       {{0, 0}, 0, 1, false, 0, 0}
+       {{0, 0, 0}, 0, 1, false, 0, 0}
 };
 
 static struct rss_mem_defs s_rss_mem_defs[] = {
-       { "rss_mem_cid", "rss_cid", 0,
-         {256, 320},
-         {32, 32} },
+       { "rss_mem_cid", "rss_cid", 0, 32,
+         {256, 320, 512} },
 
-       { "rss_mem_key_msb", "rss_key", 1024,
-         {128, 208},
-         {256, 256} },
+       { "rss_mem_key_msb", "rss_key", 1024, 256,
+         {128, 208, 257} },
 
-       { "rss_mem_key_lsb", "rss_key", 2048,
-         {128, 208},
-         {64, 64} },
+       { "rss_mem_key_lsb", "rss_key", 2048, 64,
+         {128, 208, 257} },
 
-       { "rss_mem_info", "rss_info", 3072,
-         {128, 208},
-         {16, 16} },
+       { "rss_mem_info", "rss_info", 3072, 16,
+         {128, 208, 256} },
 
-       { "rss_mem_ind", "rss_ind", 4096,
-         {16384, 26624},
-         {16, 16} }
+       { "rss_mem_ind", "rss_ind", 4096, 16,
+         {16384, 26624, 32768} }
 };
 
 static struct vfc_ram_defs s_vfc_ram_defs[] = {
@@ -1650,72 +1672,75 @@ static struct vfc_ram_defs s_vfc_ram_defs[] = {
 static struct big_ram_defs s_big_ram_defs[] = {
        { "BRB", MEM_GROUP_BRB_MEM, MEM_GROUP_BRB_RAM, DBG_GRC_PARAM_DUMP_BRB,
          BRB_REG_BIG_RAM_ADDRESS, BRB_REG_BIG_RAM_DATA,
-         {4800, 5632} },
+         MISC_REG_BLOCK_256B_EN, {0, 0, 0},
+         {153600, 180224, 282624} },
 
        { "BTB", MEM_GROUP_BTB_MEM, MEM_GROUP_BTB_RAM, DBG_GRC_PARAM_DUMP_BTB,
          BTB_REG_BIG_RAM_ADDRESS, BTB_REG_BIG_RAM_DATA,
-         {2880, 3680} },
+         MISC_REG_BLOCK_256B_EN, {0, 1, 1},
+         {92160, 117760, 168960} },
 
        { "BMB", MEM_GROUP_BMB_MEM, MEM_GROUP_BMB_RAM, DBG_GRC_PARAM_DUMP_BMB,
          BMB_REG_BIG_RAM_ADDRESS, BMB_REG_BIG_RAM_DATA,
-         {1152, 1152} }
+         MISCS_REG_BLOCK_256B_EN, {0, 0, 0},
+         {36864, 36864, 36864} }
 };
 
 static struct reset_reg_defs s_reset_regs_defs[] = {
        /* DBG_RESET_REG_MISCS_PL_UA */
-       { MISCS_REG_RESET_PL_UA, 0x0,
-         {true, true} },
+       { MISCS_REG_RESET_PL_UA,
+         {true, true, true}, {0x0, 0x0, 0x0} },
 
        /* DBG_RESET_REG_MISCS_PL_HV */
-       { MISCS_REG_RESET_PL_HV, 0x0,
-         {true, true} },
+       { MISCS_REG_RESET_PL_HV,
+         {true, true, true}, {0x0, 0x400, 0x600} },
 
        /* DBG_RESET_REG_MISCS_PL_HV_2 */
-       { MISCS_REG_RESET_PL_HV_2_K2, 0x0,
-         {false, true} },
+       { MISCS_REG_RESET_PL_HV_2_K2_E5,
+         {false, true, true}, {0x0, 0x0, 0x0} },
 
        /* DBG_RESET_REG_MISC_PL_UA */
-       { MISC_REG_RESET_PL_UA, 0x0,
-         {true, true} },
+       { MISC_REG_RESET_PL_UA,
+         {true, true, true}, {0x0, 0x0, 0x0} },
 
        /* DBG_RESET_REG_MISC_PL_HV */
-       { MISC_REG_RESET_PL_HV, 0x0,
-         {true, true} },
+       { MISC_REG_RESET_PL_HV,
+         {true, true, true}, {0x0, 0x0, 0x0} },
 
        /* DBG_RESET_REG_MISC_PL_PDA_VMAIN_1 */
-       { MISC_REG_RESET_PL_PDA_VMAIN_1, 0x4404040,
-         {true, true} },
+       { MISC_REG_RESET_PL_PDA_VMAIN_1,
+         {true, true, true}, {0x4404040, 0x4404040, 0x404040} },
 
        /* DBG_RESET_REG_MISC_PL_PDA_VMAIN_2 */
-       { MISC_REG_RESET_PL_PDA_VMAIN_2, 0x7c00007,
-         {true, true} },
+       { MISC_REG_RESET_PL_PDA_VMAIN_2,
+         {true, true, true}, {0x7, 0x7c00007, 0x5c08007} },
 
        /* DBG_RESET_REG_MISC_PL_PDA_VAUX */
-       { MISC_REG_RESET_PL_PDA_VAUX, 0x2,
-         {true, true} },
+       { MISC_REG_RESET_PL_PDA_VAUX,
+         {true, true, true}, {0x2, 0x2, 0x2} },
 };
 
 static struct phy_defs s_phy_defs[] = {
        {"nw_phy", NWS_REG_NWS_CMU_K2,
-        PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2,
-        PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2,
-        PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2,
-        PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2},
-       {"sgmii_phy", MS_REG_MS_CMU_K2,
-        PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2,
-        PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2,
-        PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2,
-        PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2},
-       {"pcie_phy0", PHY_PCIE_REG_PHY0_K2,
-        PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2,
-        PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2,
-        PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2,
-        PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2},
-       {"pcie_phy1", PHY_PCIE_REG_PHY1_K2,
-        PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2,
-        PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2,
-        PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2,
-        PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2},
+        PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2_E5,
+        PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2_E5,
+        PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2_E5,
+        PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2_E5},
+       {"sgmii_phy", MS_REG_MS_CMU_K2_E5,
+        PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2_E5,
+        PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2_E5,
+        PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2_E5,
+        PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2_E5},
+       {"pcie_phy0", PHY_PCIE_REG_PHY0_K2_E5,
+        PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2_E5,
+        PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2_E5,
+        PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2_E5,
+        PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2_E5},
+       {"pcie_phy1", PHY_PCIE_REG_PHY1_K2_E5,
+        PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2_E5,
+        PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2_E5,
+        PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2_E5,
+        PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2_E5},
 };
 
 /**************************** Private Functions ******************************/
@@ -1774,7 +1799,9 @@ static enum dbg_status qed_dbg_dev_init(struct qed_hwfn *p_hwfn,
        /* Initializes the GRC parameters */
        qed_dbg_grc_init_params(p_hwfn);
 
-       dev_data->initialized = true;
+       dev_data->use_dmae = true;
+       dev_data->num_regs_read = 0;
+       dev_data->initialized = 1;
 
        return DBG_STATUS_OK;
 }
@@ -1807,7 +1834,7 @@ static void qed_read_fw_info(struct qed_hwfn *p_hwfn,
         * The address is located in the last line of the Storm RAM.
         */
        addr = storm->sem_fast_mem_addr + SEM_FAST_REG_INT_RAM +
-              DWORDS_TO_BYTES(SEM_FAST_REG_INT_RAM_SIZE) -
+              DWORDS_TO_BYTES(SEM_FAST_REG_INT_RAM_SIZE_BB_K2) -
               sizeof(fw_info_location);
        dest = (u32 *)&fw_info_location;
 
@@ -2071,8 +2098,7 @@ static u32 qed_dump_common_global_params(struct qed_hwfn *p_hwfn,
 /* Writes the "last" section (including CRC) to the specified buffer at the
  * given offset. Returns the dumped size in dwords.
  */
-static u32 qed_dump_last_section(struct qed_hwfn *p_hwfn,
-                                u32 *dump_buf, u32 offset, bool dump)
+static u32 qed_dump_last_section(u32 *dump_buf, u32 offset, bool dump)
 {
        u32 start_offset = offset;
 
@@ -2235,7 +2261,8 @@ static bool qed_grc_is_mem_included(struct qed_hwfn *p_hwfn,
        case MEM_GROUP_CFC_MEM:
        case MEM_GROUP_CONN_CFC_MEM:
        case MEM_GROUP_TASK_CFC_MEM:
-               return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CFC);
+               return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CFC) ||
+                      qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM_CTX);
        case MEM_GROUP_IGU_MEM:
        case MEM_GROUP_IGU_MSIX:
                return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IGU);
@@ -2293,7 +2320,8 @@ static void qed_grc_unreset_blocks(struct qed_hwfn *p_hwfn,
        for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
                struct block_defs *block = s_block_defs[block_id];
 
-               if (block->has_reset_bit && block->unreset)
+               if (block->exists[dev_data->chip_id] && block->has_reset_bit &&
+                   block->unreset)
                        reg_val[block->reset_reg] |=
                            BIT(block->reset_bit_offset);
        }
@@ -2303,7 +2331,8 @@ static void qed_grc_unreset_blocks(struct qed_hwfn *p_hwfn,
                if (!s_reset_regs_defs[i].exists[dev_data->chip_id])
                        continue;
 
-               reg_val[i] |= s_reset_regs_defs[i].unreset_val;
+               reg_val[i] |=
+                       s_reset_regs_defs[i].unreset_val[dev_data->chip_id];
 
                if (reg_val[i])
                        qed_wr(p_hwfn,
@@ -2413,6 +2442,18 @@ static u32 qed_grc_dump_regs_hdr(u32 *dump_buf,
        return offset;
 }
 
+/* Reads the specified registers into the specified buffer.
+ * The addr and len arguments are specified in dwords.
+ */
+void qed_read_regs(struct qed_hwfn *p_hwfn,
+                  struct qed_ptt *p_ptt, u32 *buf, u32 addr, u32 len)
+{
+       u32 i;
+
+       for (i = 0; i < len; i++)
+               buf[i] = qed_rd(p_hwfn, p_ptt, DWORDS_TO_BYTES(addr + i));
+}
+
 /* Dumps the GRC registers in the specified address range.
  * Returns the dumped size in dwords.
  * The addr and len arguments are specified in dwords.
@@ -2422,15 +2463,39 @@ static u32 qed_grc_dump_addr_range(struct qed_hwfn *p_hwfn,
                                   u32 *dump_buf,
                                   bool dump, u32 addr, u32 len, bool wide_bus)
 {
-       u32 byte_addr = DWORDS_TO_BYTES(addr), offset = 0, i;
+       struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
 
        if (!dump)
                return len;
 
-       for (i = 0; i < len; i++, byte_addr += BYTES_IN_DWORD, offset++)
-               *(dump_buf + offset) = qed_rd(p_hwfn, p_ptt, byte_addr);
+       /* Print log if needed */
+       dev_data->num_regs_read += len;
+       if (dev_data->num_regs_read >=
+           s_platform_defs[dev_data->platform_id].log_thresh) {
+               DP_VERBOSE(p_hwfn,
+                          QED_MSG_DEBUG,
+                          "Dumping %d registers...\n",
+                          dev_data->num_regs_read);
+               dev_data->num_regs_read = 0;
+       }
 
-       return offset;
+       /* Try reading using DMAE */
+       if (dev_data->use_dmae &&
+           (len >= s_platform_defs[dev_data->platform_id].dmae_thresh ||
+            wide_bus)) {
+               if (!qed_dmae_grc2host(p_hwfn, p_ptt, DWORDS_TO_BYTES(addr),
+                                      (u64)(uintptr_t)(dump_buf), len, 0))
+                       return len;
+               dev_data->use_dmae = 0;
+               DP_VERBOSE(p_hwfn,
+                          QED_MSG_DEBUG,
+                          "Failed reading from chip using DMAE, using GRC instead\n");
+       }
+
+       /* Read registers */
+       qed_read_regs(p_hwfn, p_ptt, dump_buf, addr, len);
+
+       return len;
 }
 
 /* Dumps GRC registers sequence header. Returns the dumped size in dwords.
@@ -2630,9 +2695,6 @@ static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn,
        chip = &s_chip_defs[dev_data->chip_id];
        chip_platform = &chip->per_platform[dev_data->platform_id];
 
-       if (dump)
-               DP_VERBOSE(p_hwfn, QED_MSG_DEBUG, "Dumping registers...\n");
-
        while (input_offset <
               s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].size_in_dwords) {
                const struct dbg_dump_split_hdr *split_hdr;
@@ -2966,22 +3028,12 @@ static u32 qed_grc_dump_mem_hdr(struct qed_hwfn *p_hwfn,
 
                offset += qed_dump_str_param(dump_buf + offset,
                                             dump, "name", buf);
-               if (dump)
-                       DP_VERBOSE(p_hwfn,
-                                  QED_MSG_DEBUG,
-                                  "Dumping %d registers from %s...\n",
-                                  len, buf);
        } else {
                /* Dump address */
                u32 addr_in_bytes = DWORDS_TO_BYTES(addr);
 
                offset += qed_dump_num_param(dump_buf + offset,
                                             dump, "addr", addr_in_bytes);
-               if (dump && len > 64)
-                       DP_VERBOSE(p_hwfn,
-                                  QED_MSG_DEBUG,
-                                  "Dumping %d registers from address 0x%x...\n",
-                                  len, addr_in_bytes);
        }
 
        /* Dump len */
@@ -3530,17 +3582,16 @@ static u32 qed_grc_dump_rss(struct qed_hwfn *p_hwfn,
        u8 rss_mem_id;
 
        for (rss_mem_id = 0; rss_mem_id < NUM_RSS_MEM_TYPES; rss_mem_id++) {
-               u32 rss_addr, num_entries, entry_width, total_dwords, i;
+               u32 rss_addr, num_entries, total_dwords;
                struct rss_mem_defs *rss_defs;
-               u32 addr, size;
+               u32 addr, num_dwords_to_read;
                bool packed;
 
                rss_defs = &s_rss_mem_defs[rss_mem_id];
                rss_addr = rss_defs->addr;
                num_entries = rss_defs->num_entries[dev_data->chip_id];
-               entry_width = rss_defs->entry_width[dev_data->chip_id];
-               total_dwords = (num_entries * entry_width) / 32;
-               packed = (entry_width == 16);
+               total_dwords = (num_entries * rss_defs->entry_width) / 32;
+               packed = (rss_defs->entry_width == 16);
 
                offset += qed_grc_dump_mem_hdr(p_hwfn,
                                               dump_buf + offset,
@@ -3548,7 +3599,7 @@ static u32 qed_grc_dump_rss(struct qed_hwfn *p_hwfn,
                                               rss_defs->mem_name,
                                               0,
                                               total_dwords,
-                                              entry_width,
+                                              rss_defs->entry_width,
                                               packed,
                                               rss_defs->type_name, false, 0);
 
@@ -3559,16 +3610,20 @@ static u32 qed_grc_dump_rss(struct qed_hwfn *p_hwfn,
                }
 
                addr = BYTES_TO_DWORDS(RSS_REG_RSS_RAM_DATA);
-               size = RSS_REG_RSS_RAM_DATA_SIZE;
-               for (i = 0; i < total_dwords; i += size, rss_addr++) {
+               while (total_dwords) {
+                       num_dwords_to_read = min_t(u32,
+                                                  RSS_REG_RSS_RAM_DATA_SIZE,
+                                                  total_dwords);
                        qed_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_ADDR, rss_addr);
                        offset += qed_grc_dump_addr_range(p_hwfn,
                                                          p_ptt,
                                                          dump_buf + offset,
                                                          dump,
                                                          addr,
-                                                         size,
+                                                         num_dwords_to_read,
                                                          false);
+                       total_dwords -= num_dwords_to_read;
+                       rss_addr++;
                }
        }
 
@@ -3581,14 +3636,18 @@ static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn,
                                u32 *dump_buf, bool dump, u8 big_ram_id)
 {
        struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
-       u32 total_blocks, ram_size, offset = 0, i;
+       u32 block_size, ram_size, offset = 0, reg_val, i;
        char mem_name[12] = "???_BIG_RAM";
        char type_name[8] = "???_RAM";
        struct big_ram_defs *big_ram;
 
        big_ram = &s_big_ram_defs[big_ram_id];
-       total_blocks = big_ram->num_of_blocks[dev_data->chip_id];
-       ram_size = total_blocks * BIG_RAM_BLOCK_SIZE_DWORDS;
+       ram_size = big_ram->ram_size[dev_data->chip_id];
+
+       reg_val = qed_rd(p_hwfn, p_ptt, big_ram->is_256b_reg_addr);
+       block_size = reg_val &
+                    BIT(big_ram->is_256b_bit_offset[dev_data->chip_id]) ? 256
+                                                                        : 128;
 
        strncpy(type_name, big_ram->instance_name,
                strlen(big_ram->instance_name));
@@ -3602,7 +3661,7 @@ static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn,
                                       mem_name,
                                       0,
                                       ram_size,
-                                      BIG_RAM_BLOCK_SIZE_BYTES * 8,
+                                      block_size * 8,
                                       false, type_name, false, 0);
 
        /* Read and dump Big RAM data */
@@ -3610,12 +3669,13 @@ static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn,
                return offset + ram_size;
 
        /* Dump Big RAM */
-       for (i = 0; i < total_blocks / 2; i++) {
+       for (i = 0; i < DIV_ROUND_UP(ram_size, BRB_REG_BIG_RAM_DATA_SIZE);
+            i++) {
                u32 addr, len;
 
                qed_wr(p_hwfn, p_ptt, big_ram->addr_reg_addr, i);
                addr = BYTES_TO_DWORDS(big_ram->data_reg_addr);
-               len = 2 * BIG_RAM_BLOCK_SIZE_DWORDS;
+               len = BRB_REG_BIG_RAM_DATA_SIZE;
                offset += qed_grc_dump_addr_range(p_hwfn,
                                                  p_ptt,
                                                  dump_buf + offset,
@@ -3649,7 +3709,7 @@ static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn,
                                   dump,
                                   NULL,
                                   BYTES_TO_DWORDS(MCP_REG_SCRATCH),
-                                  MCP_REG_SCRATCH_SIZE,
+                                  MCP_REG_SCRATCH_SIZE_BB_K2,
                                   false, 0, false, "MCP", false, 0);
 
        /* Dump MCP cpu_reg_file */
@@ -3710,7 +3770,6 @@ static u32 qed_grc_dump_phy(struct qed_hwfn *p_hwfn,
                               phy_defs->tbus_data_lo_addr;
                data_hi_addr = phy_defs->base_addr +
                               phy_defs->tbus_data_hi_addr;
-               bytes_buf = (u8 *)(dump_buf + offset);
 
                if (snprintf(mem_name, sizeof(mem_name), "tbus_%s",
                             phy_defs->phy_name) < 0)
@@ -3730,6 +3789,7 @@ static u32 qed_grc_dump_phy(struct qed_hwfn *p_hwfn,
                        continue;
                }
 
+               bytes_buf = (u8 *)(dump_buf + offset);
                for (tbus_hi_offset = 0;
                     tbus_hi_offset < (NUM_PHY_TBUS_ADDRESSES >> 8);
                     tbus_hi_offset++) {
@@ -3778,19 +3838,17 @@ static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn,
        struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
        u32 block_id, line_id, offset = 0;
 
-       /* Skip static debug if a debug bus recording is in progress */
-       if (qed_rd(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON))
+       /* Don't dump static debug if a debug bus recording is in progress */
+       if (dump && qed_rd(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON))
                return 0;
 
        if (dump) {
-               DP_VERBOSE(p_hwfn,
-                          QED_MSG_DEBUG, "Dumping static debug data...\n");
-
                /* Disable all blocks debug output */
                for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
                        struct block_defs *block = s_block_defs[block_id];
 
-                       if (block->has_dbg_bus[dev_data->chip_id])
+                       if (block->dbg_client_id[dev_data->chip_id] !=
+                           MAX_DBG_BUS_CLIENTS)
                                qed_wr(p_hwfn, p_ptt, block->dbg_enable_addr,
                                       0);
                }
@@ -3811,12 +3869,12 @@ static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn,
                u32 block_dwords, addr, len;
                u8 dbg_client_id;
 
-               if (!block->has_dbg_bus[dev_data->chip_id])
+               if (block->dbg_client_id[dev_data->chip_id] ==
+                   MAX_DBG_BUS_CLIENTS)
                        continue;
 
-               block_desc =
-                       get_dbg_bus_block_desc(p_hwfn,
-                                              (enum block_id)block_id);
+               block_desc = get_dbg_bus_block_desc(p_hwfn,
+                                                   (enum block_id)block_id);
                block_dwords = NUM_DBG_LINES(block_desc) *
                               STATIC_DEBUG_LINE_DWORDS;
 
@@ -4044,7 +4102,7 @@ static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn,
                                                    dump_buf + offset, dump);
 
        /* Dump last section */
-       offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump);
+       offset += qed_dump_last_section(dump_buf, offset, dump);
 
        if (dump) {
                /* Unstall storms */
@@ -4253,30 +4311,33 @@ qed_idle_chk_dump_rule_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
                if (!check_rule && dump)
                        continue;
 
+               if (!dump) {
+                       u32 entry_dump_size =
+                               qed_idle_chk_dump_failure(p_hwfn,
+                                                         p_ptt,
+                                                         dump_buf + offset,
+                                                         false,
+                                                         rule->rule_id,
+                                                         rule,
+                                                         0,
+                                                         NULL);
+
+                       offset += num_reg_entries * entry_dump_size;
+                       (*num_failing_rules) += num_reg_entries;
+                       continue;
+               }
+
                /* Go over all register entries (number of entries is the same
                 * for all condition registers).
                 */
                for (entry_id = 0; entry_id < num_reg_entries; entry_id++) {
                        u32 next_reg_offset = 0;
 
-                       if (!dump) {
-                               offset += qed_idle_chk_dump_failure(p_hwfn,
-                                                       p_ptt,
-                                                       dump_buf + offset,
-                                                       false,
-                                                       rule->rule_id,
-                                                       rule,
-                                                       entry_id,
-                                                       NULL);
-                               (*num_failing_rules)++;
-                               break;
-                       }
-
                        /* Read current entry of all condition registers */
                        for (reg_id = 0; reg_id < rule->num_cond_regs;
                             reg_id++) {
                                const struct dbg_idle_chk_cond_reg *reg =
-                                   &cond_regs[reg_id];
+                                       &cond_regs[reg_id];
                                u32 padded_entry_size, addr;
                                bool wide_bus;
 
@@ -4291,9 +4352,9 @@ qed_idle_chk_dump_rule_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
                                if (reg->num_entries > 1 ||
                                    reg->start_entry > 0) {
                                        padded_entry_size =
-                                           reg->entry_size > 1 ?
-                                           roundup_pow_of_two(reg->entry_size)
-                                           : 1;
+                                          reg->entry_size > 1 ?
+                                          roundup_pow_of_two(reg->entry_size) :
+                                          1;
                                        addr += (reg->start_entry + entry_id) *
                                                padded_entry_size;
                                }
@@ -4329,7 +4390,6 @@ qed_idle_chk_dump_rule_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
                                                        entry_id,
                                                        cond_reg_values);
                                (*num_failing_rules)++;
-                               break;
                        }
                }
        }
@@ -4402,7 +4462,7 @@ static u32 qed_idle_chk_dump(struct qed_hwfn *p_hwfn,
                                   dump, "num_rules", num_failing_rules);
 
        /* Dump last section */
-       offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump);
+       offset += qed_dump_last_section(dump_buf, offset, dump);
 
        return offset;
 }
@@ -4474,7 +4534,7 @@ static enum dbg_status qed_nvram_read(struct qed_hwfn *p_hwfn,
                                       (nvram_offset_bytes +
                                        read_offset) |
                                       (bytes_to_copy <<
-                                       DRV_MB_PARAM_NVM_LEN_SHIFT),
+                                       DRV_MB_PARAM_NVM_LEN_OFFSET),
                                       &ret_mcp_resp, &ret_mcp_param,
                                       &ret_read_size,
                                       (u32 *)((u8 *)ret_buf + read_offset)))
@@ -4701,7 +4761,7 @@ static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
                offset += trace_meta_size_dwords;
 
        /* Dump last section */
-       offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump);
+       offset += qed_dump_last_section(dump_buf, offset, dump);
 
        *num_dumped_dwords = offset;
 
@@ -4717,7 +4777,7 @@ static enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn,
                                         u32 *dump_buf,
                                         bool dump, u32 *num_dumped_dwords)
 {
-       u32 dwords_read, size_param_offset, offset = 0;
+       u32 dwords_read, size_param_offset, offset = 0, addr, len;
        bool fifo_has_data;
 
        *num_dumped_dwords = 0;
@@ -4753,14 +4813,18 @@ static enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn,
         * buffer size since more entries could be added to the buffer as we are
         * emptying it.
         */
+       addr = BYTES_TO_DWORDS(GRC_REG_TRACE_FIFO);
+       len = REG_FIFO_ELEMENT_DWORDS;
        for (dwords_read = 0;
             fifo_has_data && dwords_read < REG_FIFO_DEPTH_DWORDS;
-            dwords_read += REG_FIFO_ELEMENT_DWORDS, offset +=
-            REG_FIFO_ELEMENT_DWORDS) {
-               if (qed_dmae_grc2host(p_hwfn, p_ptt, GRC_REG_TRACE_FIFO,
-                                     (u64)(uintptr_t)(&dump_buf[offset]),
-                                     REG_FIFO_ELEMENT_DWORDS, 0))
-                       return DBG_STATUS_DMAE_FAILED;
+            dwords_read += REG_FIFO_ELEMENT_DWORDS) {
+               offset += qed_grc_dump_addr_range(p_hwfn,
+                                                 p_ptt,
+                                                 dump_buf + offset,
+                                                 true,
+                                                 addr,
+                                                 len,
+                                                 true);
                fifo_has_data = qed_rd(p_hwfn, p_ptt,
                                       GRC_REG_TRACE_FIFO_VALID_DATA) > 0;
        }
@@ -4769,7 +4833,7 @@ static enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn,
                           dwords_read);
 out:
        /* Dump last section */
-       offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump);
+       offset += qed_dump_last_section(dump_buf, offset, dump);
 
        *num_dumped_dwords = offset;
 
@@ -4782,7 +4846,7 @@ static enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn,
                                         u32 *dump_buf,
                                         bool dump, u32 *num_dumped_dwords)
 {
-       u32 dwords_read, size_param_offset, offset = 0;
+       u32 dwords_read, size_param_offset, offset = 0, addr, len;
        bool fifo_has_data;
 
        *num_dumped_dwords = 0;
@@ -4818,16 +4882,19 @@ static enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn,
         * buffer size since more entries could be added to the buffer as we are
         * emptying it.
         */
+       addr = BYTES_TO_DWORDS(IGU_REG_ERROR_HANDLING_MEMORY);
+       len = IGU_FIFO_ELEMENT_DWORDS;
        for (dwords_read = 0;
             fifo_has_data && dwords_read < IGU_FIFO_DEPTH_DWORDS;
-            dwords_read += IGU_FIFO_ELEMENT_DWORDS, offset +=
-            IGU_FIFO_ELEMENT_DWORDS) {
-               if (qed_dmae_grc2host(p_hwfn, p_ptt,
-                                     IGU_REG_ERROR_HANDLING_MEMORY,
-                                     (u64)(uintptr_t)(&dump_buf[offset]),
-                                     IGU_FIFO_ELEMENT_DWORDS, 0))
-                       return DBG_STATUS_DMAE_FAILED;
-               fifo_has_data = qed_rd(p_hwfn, p_ptt,
+            dwords_read += IGU_FIFO_ELEMENT_DWORDS) {
+               offset += qed_grc_dump_addr_range(p_hwfn,
+                                                 p_ptt,
+                                                 dump_buf + offset,
+                                                 true,
+                                                 addr,
+                                                 len,
+                                                 true);
+               fifo_has_data = qed_rd(p_hwfn, p_ptt,
                                       IGU_REG_ERROR_HANDLING_DATA_VALID) > 0;
        }
 
@@ -4835,7 +4902,7 @@ static enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn,
                           dwords_read);
 out:
        /* Dump last section */
-       offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump);
+       offset += qed_dump_last_section(dump_buf, offset, dump);
 
        *num_dumped_dwords = offset;
 
@@ -4849,7 +4916,7 @@ static enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn,
                                                    bool dump,
                                                    u32 *num_dumped_dwords)
 {
-       u32 size_param_offset, override_window_dwords, offset = 0;
+       u32 size_param_offset, override_window_dwords, offset = 0, addr;
 
        *num_dumped_dwords = 0;
 
@@ -4875,20 +4942,21 @@ static enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn,
 
        /* Add override window info to buffer */
        override_window_dwords =
-               qed_rd(p_hwfn, p_ptt,
-                      GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW) *
-                      PROTECTION_OVERRIDE_ELEMENT_DWORDS;
-       if (qed_dmae_grc2host(p_hwfn, p_ptt,
-                             GRC_REG_PROTECTION_OVERRIDE_WINDOW,
-                             (u64)(uintptr_t)(dump_buf + offset),
-                             override_window_dwords, 0))
-               return DBG_STATUS_DMAE_FAILED;
-       offset += override_window_dwords;
+               qed_rd(p_hwfn, p_ptt, GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW) *
+               PROTECTION_OVERRIDE_ELEMENT_DWORDS;
+       addr = BYTES_TO_DWORDS(GRC_REG_PROTECTION_OVERRIDE_WINDOW);
+       offset += qed_grc_dump_addr_range(p_hwfn,
+                                         p_ptt,
+                                         dump_buf + offset,
+                                         true,
+                                         addr,
+                                         override_window_dwords,
+                                         true);
        qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
                           override_window_dwords);
 out:
        /* Dump last section */
-       offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump);
+       offset += qed_dump_last_section(dump_buf, offset, dump);
 
        *num_dumped_dwords = offset;
 
@@ -4952,9 +5020,9 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
                next_list_idx_addr = fw_asserts_section_addr +
                        DWORDS_TO_BYTES(asserts->list_next_index_dword_offset);
                next_list_idx = qed_rd(p_hwfn, p_ptt, next_list_idx_addr);
-               last_list_idx = (next_list_idx > 0
-                                ? next_list_idx
-                                asserts->list_num_elements) - 1;
+               last_list_idx = (next_list_idx > 0 ?
+                                next_list_idx :
+                                asserts->list_num_elements) - 1;
                addr = BYTES_TO_DWORDS(fw_asserts_section_addr) +
                       asserts->list_dword_offset +
                       last_list_idx * asserts->list_element_dword_size;
@@ -4967,7 +5035,7 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
        }
 
        /* Dump last section */
-       offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump);
+       offset += qed_dump_last_section(dump_buf, offset, dump);
 
        return offset;
 }
@@ -5596,10 +5664,6 @@ struct igu_fifo_addr_data {
 
 #define PROTECTION_OVERRIDE_ELEMENT_ADDR_FACTOR        4
 
-/********************************* Macros ************************************/
-
-#define BYTES_TO_DWORDS(bytes)                 ((bytes) / BYTES_IN_DWORD)
-
 /***************************** Constant Arrays *******************************/
 
 struct user_dbg_array {
@@ -5698,6 +5762,7 @@ static struct block_info s_block_info_arr[] = {
        {"phy_pcie", BLOCK_PHY_PCIE},
        {"led", BLOCK_LED},
        {"avs_wrap", BLOCK_AVS_WRAP},
+       {"pxpreqbus", BLOCK_PXPREQBUS},
        {"misc_aeu", BLOCK_MISC_AEU},
        {"bar0_map", BLOCK_BAR0_MAP}
 };
@@ -5830,8 +5895,8 @@ static const char * const s_status_str[] = {
        /* DBG_STATUS_MCP_COULD_NOT_RESUME */
        "Failed to resume MCP after halt",
 
-       /* DBG_STATUS_DMAE_FAILED */
-       "DMAE transaction failed",
+       /* DBG_STATUS_RESERVED2 */
+       "Reserved debug status - shouldn't be returned",
 
        /* DBG_STATUS_SEMI_FIFO_NOT_EMPTY */
        "Failed to empty SEMI sync FIFO",
@@ -6109,6 +6174,7 @@ static u32 qed_read_param(u32 *dump_buf,
        if (*(char_buf + offset++)) {
                /* String param */
                *param_str_val = char_buf + offset;
+               *param_num_val = 0;
                offset += strlen(*param_str_val) + 1;
                if (offset & 0x3)
                        offset += (4 - (offset & 0x3));
@@ -6177,8 +6243,7 @@ static u32 qed_print_section_params(u32 *dump_buf,
 /* Parses the idle check rules and returns the number of characters printed.
  * In case of parsing error, returns 0.
  */
-static u32 qed_parse_idle_chk_dump_rules(struct qed_hwfn *p_hwfn,
-                                        u32 *dump_buf,
+static u32 qed_parse_idle_chk_dump_rules(u32 *dump_buf,
                                         u32 *dump_buf_end,
                                         u32 num_rules,
                                         bool print_fw_idle_chk,
@@ -6322,8 +6387,7 @@ static u32 qed_parse_idle_chk_dump_rules(struct qed_hwfn *p_hwfn,
  * parsed_results_bytes.
  * The parsing status is returned.
  */
-static enum dbg_status qed_parse_idle_chk_dump(struct qed_hwfn *p_hwfn,
-                                              u32 *dump_buf,
+static enum dbg_status qed_parse_idle_chk_dump(u32 *dump_buf,
                                               u32 num_dumped_dwords,
                                               char *results_buf,
                                               u32 *parsed_results_bytes,
@@ -6375,13 +6439,16 @@ static enum dbg_status qed_parse_idle_chk_dump(struct qed_hwfn *p_hwfn,
                                            results_offset),
                            "FW_IDLE_CHECK:\n");
                rules_print_size =
-                       qed_parse_idle_chk_dump_rules(p_hwfn, dump_buf,
-                                                     dump_buf_end, num_rules,
+                       qed_parse_idle_chk_dump_rules(dump_buf,
+                                                     dump_buf_end,
+                                                     num_rules,
                                                      true,
                                                      results_buf ?
                                                      results_buf +
-                                                     results_offset : NULL,
-                                                     num_errors, num_warnings);
+                                                     results_offset :
+                                                     NULL,
+                                                     num_errors,
+                                                     num_warnings);
                results_offset += rules_print_size;
                if (!rules_print_size)
                        return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
@@ -6392,13 +6459,16 @@ static enum dbg_status qed_parse_idle_chk_dump(struct qed_hwfn *p_hwfn,
                                            results_offset),
                            "\nLSI_IDLE_CHECK:\n");
                rules_print_size =
-                       qed_parse_idle_chk_dump_rules(p_hwfn, dump_buf,
-                                                     dump_buf_end, num_rules,
+                       qed_parse_idle_chk_dump_rules(dump_buf,
+                                                     dump_buf_end,
+                                                     num_rules,
                                                      false,
                                                      results_buf ?
                                                      results_buf +
-                                                     results_offset : NULL,
-                                                     num_errors, num_warnings);
+                                                     results_offset :
+                                                     NULL,
+                                                     num_errors,
+                                                     num_warnings);
                results_offset += rules_print_size;
                if (!rules_print_size)
                        return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
@@ -6537,7 +6607,6 @@ static enum dbg_status qed_mcp_trace_alloc_meta(struct qed_hwfn *p_hwfn,
  */
 static enum dbg_status qed_parse_mcp_trace_dump(struct qed_hwfn *p_hwfn,
                                                u32 *dump_buf,
-                                               u32 num_dumped_dwords,
                                                char *results_buf,
                                                u32 *parsed_results_bytes)
 {
@@ -6725,9 +6794,7 @@ free_mem:
  * parsed_results_bytes.
  * The parsing status is returned.
  */
-static enum dbg_status qed_parse_reg_fifo_dump(struct qed_hwfn *p_hwfn,
-                                              u32 *dump_buf,
-                                              u32 num_dumped_dwords,
+static enum dbg_status qed_parse_reg_fifo_dump(u32 *dump_buf,
                                               char *results_buf,
                                               u32 *parsed_results_bytes)
 {
@@ -6834,8 +6901,7 @@ static enum dbg_status qed_parse_reg_fifo_dump(struct qed_hwfn *p_hwfn,
 static enum dbg_status qed_parse_igu_fifo_element(struct igu_fifo_element
                                                  *element, char
                                                  *results_buf,
-                                                 u32 *results_offset,
-                                                 u32 *parsed_results_bytes)
+                                                 u32 *results_offset)
 {
        const struct igu_fifo_addr_data *found_addr = NULL;
        u8 source, err_type, i, is_cleanup;
@@ -6933,9 +6999,9 @@ static enum dbg_status qed_parse_igu_fifo_element(struct igu_fifo_element
                                "cmd_type: prod/cons update, prod/cons: 0x%x, update_flag: %s, en_dis_int_for_sb : %s, segment : %s, timer_mask = %d, ",
                                prod_cons,
                                update_flag ? "update" : "nop",
-                               en_dis_int_for_sb
-                               ? (en_dis_int_for_sb == 1 ? "disable" : "nop")
-                               "enable",
+                               en_dis_int_for_sb ?
+                               (en_dis_int_for_sb == 1 ? "disable" : "nop") :
+                               "enable",
                                segment ? "attn" : "regular",
                                timer_mask);
                }
@@ -6969,9 +7035,7 @@ out:
  * parsed_results_bytes.
  * The parsing status is returned.
  */
-static enum dbg_status qed_parse_igu_fifo_dump(struct qed_hwfn *p_hwfn,
-                                              u32 *dump_buf,
-                                              u32 num_dumped_dwords,
+static enum dbg_status qed_parse_igu_fifo_dump(u32 *dump_buf,
                                               char *results_buf,
                                               u32 *parsed_results_bytes)
 {
@@ -7011,8 +7075,7 @@ static enum dbg_status qed_parse_igu_fifo_dump(struct qed_hwfn *p_hwfn,
        for (i = 0; i < num_elements; i++) {
                status = qed_parse_igu_fifo_element(&elements[i],
                                                    results_buf,
-                                                   &results_offset,
-                                                   parsed_results_bytes);
+                                                   &results_offset);
                if (status != DBG_STATUS_OK)
                        return status;
        }
@@ -7028,9 +7091,7 @@ static enum dbg_status qed_parse_igu_fifo_dump(struct qed_hwfn *p_hwfn,
 }
 
 static enum dbg_status
-qed_parse_protection_override_dump(struct qed_hwfn *p_hwfn,
-                                  u32 *dump_buf,
-                                  u32 num_dumped_dwords,
+qed_parse_protection_override_dump(u32 *dump_buf,
                                   char *results_buf,
                                   u32 *parsed_results_bytes)
 {
@@ -7105,9 +7166,7 @@ qed_parse_protection_override_dump(struct qed_hwfn *p_hwfn,
  * parsed_results_bytes.
  * The parsing status is returned.
  */
-static enum dbg_status qed_parse_fw_asserts_dump(struct qed_hwfn *p_hwfn,
-                                                u32 *dump_buf,
-                                                u32 num_dumped_dwords,
+static enum dbg_status qed_parse_fw_asserts_dump(u32 *dump_buf,
                                                 char *results_buf,
                                                 u32 *parsed_results_bytes)
 {
@@ -7209,8 +7268,7 @@ enum dbg_status qed_get_idle_chk_results_buf_size(struct qed_hwfn *p_hwfn,
 {
        u32 num_errors, num_warnings;
 
-       return qed_parse_idle_chk_dump(p_hwfn,
-                                      dump_buf,
+       return qed_parse_idle_chk_dump(dump_buf,
                                       num_dumped_dwords,
                                       NULL,
                                       results_buf_size,
@@ -7221,12 +7279,12 @@ enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn,
                                           u32 *dump_buf,
                                           u32 num_dumped_dwords,
                                           char *results_buf,
-                                          u32 *num_errors, u32 *num_warnings)
+                                          u32 *num_errors,
+                                          u32 *num_warnings)
 {
        u32 parsed_buf_size;
 
-       return qed_parse_idle_chk_dump(p_hwfn,
-                                      dump_buf,
+       return qed_parse_idle_chk_dump(dump_buf,
                                       num_dumped_dwords,
                                       results_buf,
                                       &parsed_buf_size,
@@ -7245,9 +7303,7 @@ enum dbg_status qed_get_mcp_trace_results_buf_size(struct qed_hwfn *p_hwfn,
                                                   u32 *results_buf_size)
 {
        return qed_parse_mcp_trace_dump(p_hwfn,
-                                       dump_buf,
-                                       num_dumped_dwords,
-                                       NULL, results_buf_size);
+                                       dump_buf, NULL, results_buf_size);
 }
 
 enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn,
@@ -7259,7 +7315,6 @@ enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn,
 
        return qed_parse_mcp_trace_dump(p_hwfn,
                                        dump_buf,
-                                       num_dumped_dwords,
                                        results_buf, &parsed_buf_size);
 }
 
@@ -7268,10 +7323,7 @@ enum dbg_status qed_get_reg_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
                                                  u32 num_dumped_dwords,
                                                  u32 *results_buf_size)
 {
-       return qed_parse_reg_fifo_dump(p_hwfn,
-                                      dump_buf,
-                                      num_dumped_dwords,
-                                      NULL, results_buf_size);
+       return qed_parse_reg_fifo_dump(dump_buf, NULL, results_buf_size);
 }
 
 enum dbg_status qed_print_reg_fifo_results(struct qed_hwfn *p_hwfn,
@@ -7281,10 +7333,7 @@ enum dbg_status qed_print_reg_fifo_results(struct qed_hwfn *p_hwfn,
 {
        u32 parsed_buf_size;
 
-       return qed_parse_reg_fifo_dump(p_hwfn,
-                                      dump_buf,
-                                      num_dumped_dwords,
-                                      results_buf, &parsed_buf_size);
+       return qed_parse_reg_fifo_dump(dump_buf, results_buf, &parsed_buf_size);
 }
 
 enum dbg_status qed_get_igu_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
@@ -7292,10 +7341,7 @@ enum dbg_status qed_get_igu_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
                                                  u32 num_dumped_dwords,
                                                  u32 *results_buf_size)
 {
-       return qed_parse_igu_fifo_dump(p_hwfn,
-                                      dump_buf,
-                                      num_dumped_dwords,
-                                      NULL, results_buf_size);
+       return qed_parse_igu_fifo_dump(dump_buf, NULL, results_buf_size);
 }
 
 enum dbg_status qed_print_igu_fifo_results(struct qed_hwfn *p_hwfn,
@@ -7305,10 +7351,7 @@ enum dbg_status qed_print_igu_fifo_results(struct qed_hwfn *p_hwfn,
 {
        u32 parsed_buf_size;
 
-       return qed_parse_igu_fifo_dump(p_hwfn,
-                                      dump_buf,
-                                      num_dumped_dwords,
-                                      results_buf, &parsed_buf_size);
+       return qed_parse_igu_fifo_dump(dump_buf, results_buf, &parsed_buf_size);
 }
 
 enum dbg_status
@@ -7317,9 +7360,7 @@ qed_get_protection_override_results_buf_size(struct qed_hwfn *p_hwfn,
                                             u32 num_dumped_dwords,
                                             u32 *results_buf_size)
 {
-       return qed_parse_protection_override_dump(p_hwfn,
-                                                 dump_buf,
-                                                 num_dumped_dwords,
+       return qed_parse_protection_override_dump(dump_buf,
                                                  NULL, results_buf_size);
 }
 
@@ -7330,9 +7371,7 @@ enum dbg_status qed_print_protection_override_results(struct qed_hwfn *p_hwfn,
 {
        u32 parsed_buf_size;
 
-       return qed_parse_protection_override_dump(p_hwfn,
-                                                 dump_buf,
-                                                 num_dumped_dwords,
+       return qed_parse_protection_override_dump(dump_buf,
                                                  results_buf,
                                                  &parsed_buf_size);
 }
@@ -7342,10 +7381,7 @@ enum dbg_status qed_get_fw_asserts_results_buf_size(struct qed_hwfn *p_hwfn,
                                                    u32 num_dumped_dwords,
                                                    u32 *results_buf_size)
 {
-       return qed_parse_fw_asserts_dump(p_hwfn,
-                                        dump_buf,
-                                        num_dumped_dwords,
-                                        NULL, results_buf_size);
+       return qed_parse_fw_asserts_dump(dump_buf, NULL, results_buf_size);
 }
 
 enum dbg_status qed_print_fw_asserts_results(struct qed_hwfn *p_hwfn,
@@ -7355,9 +7391,7 @@ enum dbg_status qed_print_fw_asserts_results(struct qed_hwfn *p_hwfn,
 {
        u32 parsed_buf_size;
 
-       return qed_parse_fw_asserts_dump(p_hwfn,
-                                        dump_buf,
-                                        num_dumped_dwords,
+       return qed_parse_fw_asserts_dump(dump_buf,
                                         results_buf, &parsed_buf_size);
 }
 
@@ -7386,30 +7420,30 @@ enum dbg_status qed_dbg_parse_attn(struct qed_hwfn *p_hwfn,
 
        /* Go over registers with a non-zero attention status */
        for (i = 0; i < num_regs; i++) {
+               struct dbg_attn_bit_mapping *bit_mapping;
                struct dbg_attn_reg_result *reg_result;
-               struct dbg_attn_bit_mapping *mapping;
                u8 num_reg_attn, bit_idx = 0;
 
                reg_result = &results->reg_results[i];
                num_reg_attn = GET_FIELD(reg_result->data,
                                         DBG_ATTN_REG_RESULT_NUM_REG_ATTN);
                block_attn = &s_user_dbg_arrays[BIN_BUF_DBG_ATTN_INDEXES];
-               mapping = &((struct dbg_attn_bit_mapping *)
-                           block_attn->ptr)[reg_result->block_attn_offset];
+               bit_mapping = &((struct dbg_attn_bit_mapping *)
+                               block_attn->ptr)[reg_result->block_attn_offset];
 
                pstrings = &s_user_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS];
 
                /* Go over attention status bits */
                for (j = 0; j < num_reg_attn; j++) {
-                       u16 attn_idx_val = GET_FIELD(mapping[j].data,
+                       u16 attn_idx_val = GET_FIELD(bit_mapping[j].data,
                                                     DBG_ATTN_BIT_MAPPING_VAL);
                        const char *attn_name, *attn_type_str, *masked_str;
-                       u32 name_offset, sts_addr;
+                       u32 attn_name_offset, sts_addr;
 
                        /* Check if bit mask should be advanced (due to unused
                         * bits).
                         */
-                       if (GET_FIELD(mapping[j].data,
+                       if (GET_FIELD(bit_mapping[j].data,
                                      DBG_ATTN_BIT_MAPPING_IS_UNUSED_BIT_CNT)) {
                                bit_idx += (u8)attn_idx_val;
                                continue;
@@ -7422,9 +7456,10 @@ enum dbg_status qed_dbg_parse_attn(struct qed_hwfn *p_hwfn,
                        }
 
                        /* Find attention name */
-                       name_offset = block_attn_name_offsets[attn_idx_val];
+                       attn_name_offset =
+                               block_attn_name_offsets[attn_idx_val];
                        attn_name = &((const char *)
-                                     pstrings->ptr)[name_offset];
+                                     pstrings->ptr)[attn_name_offset];
                        attn_type_str = attn_type == ATTN_TYPE_INTERRUPT ?
                                        "Interrupt" : "Parity";
                        masked_str = reg_result->mask_val & BIT(bit_idx) ?
index 58a689f..553a6d1 100644 (file)
@@ -758,7 +758,7 @@ static void qed_init_qm_info(struct qed_hwfn *p_hwfn)
 /* This function reconfigures the QM pf on the fly.
  * For this purpose we:
  * 1. reconfigure the QM database
- * 2. set new values to runtime arrat
+ * 2. set new values to runtime array
  * 3. send an sdm_qm_cmd through the rbc interface to stop the QM
  * 4. activate init tool in QM_PF stage
  * 5. send an sdm_qm_cmd through rbc interface to release the QM
@@ -784,7 +784,7 @@ int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
        qed_init_clear_rt_data(p_hwfn);
 
        /* prepare QM portion of runtime array */
-       qed_qm_init_pf(p_hwfn, p_ptt);
+       qed_qm_init_pf(p_hwfn, p_ptt, false);
 
        /* activate init tool on runtime array */
        rc = qed_init_run(p_hwfn, p_ptt, PHASE_QM_PF, p_hwfn->rel_pf_id,
@@ -1515,7 +1515,7 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
                             NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET, 1);
        }
 
-       /* Protocl Configuration  */
+       /* Protocol Configuration */
        STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_TCP_RT_OFFSET,
                     (p_hwfn->hw_info.personality == QED_PCI_ISCSI) ? 1 : 0);
        STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_FCOE_RT_OFFSET,
@@ -1527,6 +1527,11 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
        if (rc)
                return rc;
 
+       /* Sanity check before the PF init sequence that uses DMAE */
+       rc = qed_dmae_sanity(p_hwfn, p_ptt, "pf_phase");
+       if (rc)
+               return rc;
+
        /* PF Init sequence */
        rc = qed_init_run(p_hwfn, p_ptt, PHASE_PF, rel_pf_id, hw_mode);
        if (rc)
@@ -2192,7 +2197,7 @@ qed_hw_set_soft_resc_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
                        /* No need for a case for QED_CMDQS_CQS since
                         * CNQ/CMDQS are the same resource.
                         */
-                       resc_max_val = NUM_OF_CMDQS_CQS;
+                       resc_max_val = NUM_OF_GLOBAL_QUEUES;
                        break;
                case QED_RDMA_STATS_QUEUE:
                        resc_max_val = b_ah ? RDMA_NUM_STATISTIC_COUNTERS_K2
@@ -2267,7 +2272,7 @@ int qed_hw_get_dflt_resc(struct qed_hwfn *p_hwfn,
        case QED_RDMA_CNQ_RAM:
        case QED_CMDQS_CQS:
                /* CNQ/CMDQS are the same resource */
-               *p_resc_num = NUM_OF_CMDQS_CQS / num_funcs;
+               *p_resc_num = NUM_OF_GLOBAL_QUEUES / num_funcs;
                break;
        case QED_RDMA_STATS_QUEUE:
                *p_resc_num = (b_ah ? RDMA_NUM_STATISTIC_COUNTERS_K2 :
index df195c0..2dc9b31 100644 (file)
@@ -115,7 +115,7 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn,
        struct qed_fcoe_pf_params *fcoe_pf_params = NULL;
        struct fcoe_init_ramrod_params *p_ramrod = NULL;
        struct fcoe_init_func_ramrod_data *p_data;
-       struct fcoe_conn_context *p_cxt = NULL;
+       struct e4_fcoe_conn_context *p_cxt = NULL;
        struct qed_spq_entry *p_ent = NULL;
        struct qed_sp_init_data init_data;
        struct qed_cxt_info cxt_info;
@@ -167,7 +167,7 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn,
        }
        p_cxt = cxt_info.p_cxt;
        SET_FIELD(p_cxt->tstorm_ag_context.flags3,
-                 TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN, 1);
+                 E4_TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN, 1);
 
        fcoe_pf_params->dummy_icid = (u16)dummy_cid;
 
@@ -568,7 +568,7 @@ int qed_fcoe_alloc(struct qed_hwfn *p_hwfn)
 
 void qed_fcoe_setup(struct qed_hwfn *p_hwfn)
 {
-       struct fcoe_task_context *p_task_ctx = NULL;
+       struct e4_fcoe_task_context *p_task_ctx = NULL;
        int rc;
        u32 i;
 
@@ -580,13 +580,13 @@ void qed_fcoe_setup(struct qed_hwfn *p_hwfn)
                if (rc)
                        continue;
 
-               memset(p_task_ctx, 0, sizeof(struct fcoe_task_context));
+               memset(p_task_ctx, 0, sizeof(struct e4_fcoe_task_context));
                SET_FIELD(p_task_ctx->timer_context.logical_client_0,
                          TIMERS_CONTEXT_VALIDLC0, 1);
                SET_FIELD(p_task_ctx->timer_context.logical_client_1,
                          TIMERS_CONTEXT_VALIDLC1, 1);
                SET_FIELD(p_task_ctx->tstorm_ag_context.flags0,
-                         TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE, 1);
+                         E4_TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE, 1);
        }
 }
 
index 3427fe7..de873d7 100644 (file)
@@ -54,7 +54,7 @@
 struct qed_hwfn;
 struct qed_ptt;
 
-/* opcodes for the event ring */
+/* Opcodes for the event ring */
 enum common_event_opcode {
        COMMON_EVENT_PF_START,
        COMMON_EVENT_PF_STOP,
@@ -82,487 +82,7 @@ enum common_ramrod_cmd_id {
        MAX_COMMON_RAMROD_CMD_ID
 };
 
-/* The core storm context for the Ystorm */
-struct ystorm_core_conn_st_ctx {
-       __le32 reserved[4];
-};
-
-/* The core storm context for the Pstorm */
-struct pstorm_core_conn_st_ctx {
-       __le32 reserved[4];
-};
-
-/* Core Slowpath Connection storm context of Xstorm */
-struct xstorm_core_conn_st_ctx {
-       __le32 spq_base_lo;
-       __le32 spq_base_hi;
-       struct regpair consolid_base_addr;
-       __le16 spq_cons;
-       __le16 consolid_cons;
-       __le32 reserved0[55];
-};
-
-struct xstorm_core_conn_ag_ctx {
-       u8 reserved0;
-       u8 core_state;
-       u8 flags0;
-#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_MASK      0x1
-#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT     0
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED1_MASK         0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED1_SHIFT                1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED2_MASK         0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED2_SHIFT                2
-#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_MASK      0x1
-#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT     3
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED3_MASK         0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED3_SHIFT                4
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED4_MASK         0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED4_SHIFT                5
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED5_MASK         0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED5_SHIFT                6
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED6_MASK         0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED6_SHIFT                7
-       u8 flags1;
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED7_MASK         0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED7_SHIFT                0
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED8_MASK         0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED8_SHIFT                1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED9_MASK         0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED9_SHIFT                2
-#define XSTORM_CORE_CONN_AG_CTX_BIT11_MASK             0x1
-#define XSTORM_CORE_CONN_AG_CTX_BIT11_SHIFT            3
-#define XSTORM_CORE_CONN_AG_CTX_BIT12_MASK             0x1
-#define XSTORM_CORE_CONN_AG_CTX_BIT12_SHIFT            4
-#define XSTORM_CORE_CONN_AG_CTX_BIT13_MASK             0x1
-#define XSTORM_CORE_CONN_AG_CTX_BIT13_SHIFT            5
-#define XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_MASK    0x1
-#define XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT   6
-#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_MASK      0x1
-#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT     7
-       u8 flags2;
-#define XSTORM_CORE_CONN_AG_CTX_CF0_MASK       0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF0_SHIFT      0
-#define XSTORM_CORE_CONN_AG_CTX_CF1_MASK       0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF1_SHIFT      2
-#define XSTORM_CORE_CONN_AG_CTX_CF2_MASK       0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF2_SHIFT      4
-#define XSTORM_CORE_CONN_AG_CTX_CF3_MASK       0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF3_SHIFT      6
-       u8 flags3;
-#define XSTORM_CORE_CONN_AG_CTX_CF4_MASK       0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF4_SHIFT      0
-#define XSTORM_CORE_CONN_AG_CTX_CF5_MASK       0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF5_SHIFT      2
-#define XSTORM_CORE_CONN_AG_CTX_CF6_MASK       0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF6_SHIFT      4
-#define XSTORM_CORE_CONN_AG_CTX_CF7_MASK       0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF7_SHIFT      6
-       u8 flags4;
-#define XSTORM_CORE_CONN_AG_CTX_CF8_MASK       0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF8_SHIFT      0
-#define XSTORM_CORE_CONN_AG_CTX_CF9_MASK       0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF9_SHIFT      2
-#define XSTORM_CORE_CONN_AG_CTX_CF10_MASK      0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF10_SHIFT     4
-#define XSTORM_CORE_CONN_AG_CTX_CF11_MASK      0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF11_SHIFT     6
-       u8 flags5;
-#define XSTORM_CORE_CONN_AG_CTX_CF12_MASK      0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF12_SHIFT     0
-#define XSTORM_CORE_CONN_AG_CTX_CF13_MASK      0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF13_SHIFT     2
-#define XSTORM_CORE_CONN_AG_CTX_CF14_MASK      0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF14_SHIFT     4
-#define XSTORM_CORE_CONN_AG_CTX_CF15_MASK      0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF15_SHIFT     6
-       u8 flags6;
-#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_MASK  0x3
-#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_SHIFT 0
-#define XSTORM_CORE_CONN_AG_CTX_CF17_MASK              0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF17_SHIFT             2
-#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_MASK             0x3
-#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_SHIFT            4
-#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_MASK      0x3
-#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_SHIFT     6
-       u8 flags7;
-#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_MASK          0x3
-#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_SHIFT         0
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED10_MASK                0x3
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED10_SHIFT       2
-#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_MASK         0x3
-#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_SHIFT                4
-#define XSTORM_CORE_CONN_AG_CTX_CF0EN_MASK             0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT            6
-#define XSTORM_CORE_CONN_AG_CTX_CF1EN_MASK             0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT            7
-       u8 flags8;
-#define XSTORM_CORE_CONN_AG_CTX_CF2EN_MASK     0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT    0
-#define XSTORM_CORE_CONN_AG_CTX_CF3EN_MASK     0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT    1
-#define XSTORM_CORE_CONN_AG_CTX_CF4EN_MASK     0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT    2
-#define XSTORM_CORE_CONN_AG_CTX_CF5EN_MASK     0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT    3
-#define XSTORM_CORE_CONN_AG_CTX_CF6EN_MASK     0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT    4
-#define XSTORM_CORE_CONN_AG_CTX_CF7EN_MASK     0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT    5
-#define XSTORM_CORE_CONN_AG_CTX_CF8EN_MASK     0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT    6
-#define XSTORM_CORE_CONN_AG_CTX_CF9EN_MASK     0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT    7
-       u8 flags9;
-#define XSTORM_CORE_CONN_AG_CTX_CF10EN_MASK                    0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT                   0
-#define XSTORM_CORE_CONN_AG_CTX_CF11EN_MASK                    0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF11EN_SHIFT                   1
-#define XSTORM_CORE_CONN_AG_CTX_CF12EN_MASK                    0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF12EN_SHIFT                   2
-#define XSTORM_CORE_CONN_AG_CTX_CF13EN_MASK                    0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF13EN_SHIFT                   3
-#define XSTORM_CORE_CONN_AG_CTX_CF14EN_MASK                    0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF14EN_SHIFT                   4
-#define XSTORM_CORE_CONN_AG_CTX_CF15EN_MASK                    0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF15EN_SHIFT                   5
-#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_MASK       0x1
-#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_SHIFT      6
-#define XSTORM_CORE_CONN_AG_CTX_CF17EN_MASK                    0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF17EN_SHIFT                   7
-       u8 flags10;
-#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_MASK          0x1
-#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_SHIFT         0
-#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_MASK   0x1
-#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT  1
-#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_MASK       0x1
-#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT      2
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED11_MASK                0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED11_SHIFT       3
-#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_MASK      0x1
-#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT     4
-#define XSTORM_CORE_CONN_AG_CTX_CF23EN_MASK            0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF23EN_SHIFT           5
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED12_MASK                0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED12_SHIFT       6
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED13_MASK                0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED13_SHIFT       7
-       u8 flags11;
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED14_MASK                0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED14_SHIFT       0
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED15_MASK                0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED15_SHIFT       1
-#define XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_MASK    0x1
-#define XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT   2
-#define XSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK           0x1
-#define XSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT          3
-#define XSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK           0x1
-#define XSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT          4
-#define XSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK           0x1
-#define XSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT          5
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_MASK      0x1
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_SHIFT     6
-#define XSTORM_CORE_CONN_AG_CTX_RULE9EN_MASK           0x1
-#define XSTORM_CORE_CONN_AG_CTX_RULE9EN_SHIFT          7
-       u8 flags12;
-#define XSTORM_CORE_CONN_AG_CTX_RULE10EN_MASK          0x1
-#define XSTORM_CORE_CONN_AG_CTX_RULE10EN_SHIFT         0
-#define XSTORM_CORE_CONN_AG_CTX_RULE11EN_MASK          0x1
-#define XSTORM_CORE_CONN_AG_CTX_RULE11EN_SHIFT         1
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_MASK      0x1
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_SHIFT     2
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_MASK      0x1
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_SHIFT     3
-#define XSTORM_CORE_CONN_AG_CTX_RULE14EN_MASK          0x1
-#define XSTORM_CORE_CONN_AG_CTX_RULE14EN_SHIFT         4
-#define XSTORM_CORE_CONN_AG_CTX_RULE15EN_MASK          0x1
-#define XSTORM_CORE_CONN_AG_CTX_RULE15EN_SHIFT         5
-#define XSTORM_CORE_CONN_AG_CTX_RULE16EN_MASK          0x1
-#define XSTORM_CORE_CONN_AG_CTX_RULE16EN_SHIFT         6
-#define XSTORM_CORE_CONN_AG_CTX_RULE17EN_MASK          0x1
-#define XSTORM_CORE_CONN_AG_CTX_RULE17EN_SHIFT         7
-       u8 flags13;
-#define XSTORM_CORE_CONN_AG_CTX_RULE18EN_MASK          0x1
-#define XSTORM_CORE_CONN_AG_CTX_RULE18EN_SHIFT         0
-#define XSTORM_CORE_CONN_AG_CTX_RULE19EN_MASK          0x1
-#define XSTORM_CORE_CONN_AG_CTX_RULE19EN_SHIFT         1
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_MASK      0x1
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_SHIFT     2
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_MASK      0x1
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_SHIFT     3
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_MASK      0x1
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_SHIFT     4
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_MASK      0x1
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_SHIFT     5
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_MASK      0x1
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_SHIFT     6
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_MASK      0x1
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_SHIFT     7
-       u8 flags14;
-#define XSTORM_CORE_CONN_AG_CTX_BIT16_MASK     0x1
-#define XSTORM_CORE_CONN_AG_CTX_BIT16_SHIFT    0
-#define XSTORM_CORE_CONN_AG_CTX_BIT17_MASK     0x1
-#define XSTORM_CORE_CONN_AG_CTX_BIT17_SHIFT    1
-#define XSTORM_CORE_CONN_AG_CTX_BIT18_MASK     0x1
-#define XSTORM_CORE_CONN_AG_CTX_BIT18_SHIFT    2
-#define XSTORM_CORE_CONN_AG_CTX_BIT19_MASK     0x1
-#define XSTORM_CORE_CONN_AG_CTX_BIT19_SHIFT    3
-#define XSTORM_CORE_CONN_AG_CTX_BIT20_MASK     0x1
-#define XSTORM_CORE_CONN_AG_CTX_BIT20_SHIFT    4
-#define XSTORM_CORE_CONN_AG_CTX_BIT21_MASK     0x1
-#define XSTORM_CORE_CONN_AG_CTX_BIT21_SHIFT    5
-#define XSTORM_CORE_CONN_AG_CTX_CF23_MASK      0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF23_SHIFT     6
-       u8 byte2;
-       __le16 physical_q0;
-       __le16 consolid_prod;
-       __le16 reserved16;
-       __le16 tx_bd_cons;
-       __le16 tx_bd_or_spq_prod;
-       __le16 word5;
-       __le16 conn_dpi;
-       u8 byte3;
-       u8 byte4;
-       u8 byte5;
-       u8 byte6;
-       __le32 reg0;
-       __le32 reg1;
-       __le32 reg2;
-       __le32 reg3;
-       __le32 reg4;
-       __le32 reg5;
-       __le32 reg6;
-       __le16 word7;
-       __le16 word8;
-       __le16 word9;
-       __le16 word10;
-       __le32 reg7;
-       __le32 reg8;
-       __le32 reg9;
-       u8 byte7;
-       u8 byte8;
-       u8 byte9;
-       u8 byte10;
-       u8 byte11;
-       u8 byte12;
-       u8 byte13;
-       u8 byte14;
-       u8 byte15;
-       u8 e5_reserved;
-       __le16 word11;
-       __le32 reg10;
-       __le32 reg11;
-       __le32 reg12;
-       __le32 reg13;
-       __le32 reg14;
-       __le32 reg15;
-       __le32 reg16;
-       __le32 reg17;
-       __le32 reg18;
-       __le32 reg19;
-       __le16 word12;
-       __le16 word13;
-       __le16 word14;
-       __le16 word15;
-};
-
-struct tstorm_core_conn_ag_ctx {
-       u8 byte0;
-       u8 byte1;
-       u8 flags0;
-#define TSTORM_CORE_CONN_AG_CTX_BIT0_MASK     0x1      /* exist_in_qm0 */
-#define TSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT    0
-#define TSTORM_CORE_CONN_AG_CTX_BIT1_MASK     0x1      /* exist_in_qm1 */
-#define TSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT    1
-#define TSTORM_CORE_CONN_AG_CTX_BIT2_MASK     0x1      /* bit2 */
-#define TSTORM_CORE_CONN_AG_CTX_BIT2_SHIFT    2
-#define TSTORM_CORE_CONN_AG_CTX_BIT3_MASK     0x1      /* bit3 */
-#define TSTORM_CORE_CONN_AG_CTX_BIT3_SHIFT    3
-#define TSTORM_CORE_CONN_AG_CTX_BIT4_MASK     0x1      /* bit4 */
-#define TSTORM_CORE_CONN_AG_CTX_BIT4_SHIFT    4
-#define TSTORM_CORE_CONN_AG_CTX_BIT5_MASK     0x1      /* bit5 */
-#define TSTORM_CORE_CONN_AG_CTX_BIT5_SHIFT    5
-#define TSTORM_CORE_CONN_AG_CTX_CF0_MASK      0x3      /* timer0cf */
-#define TSTORM_CORE_CONN_AG_CTX_CF0_SHIFT     6
-       u8 flags1;
-#define TSTORM_CORE_CONN_AG_CTX_CF1_MASK      0x3      /* timer1cf */
-#define TSTORM_CORE_CONN_AG_CTX_CF1_SHIFT     0
-#define TSTORM_CORE_CONN_AG_CTX_CF2_MASK      0x3      /* timer2cf */
-#define TSTORM_CORE_CONN_AG_CTX_CF2_SHIFT     2
-#define TSTORM_CORE_CONN_AG_CTX_CF3_MASK      0x3      /* timer_stop_all */
-#define TSTORM_CORE_CONN_AG_CTX_CF3_SHIFT     4
-#define TSTORM_CORE_CONN_AG_CTX_CF4_MASK      0x3      /* cf4 */
-#define TSTORM_CORE_CONN_AG_CTX_CF4_SHIFT     6
-       u8 flags2;
-#define TSTORM_CORE_CONN_AG_CTX_CF5_MASK      0x3      /* cf5 */
-#define TSTORM_CORE_CONN_AG_CTX_CF5_SHIFT     0
-#define TSTORM_CORE_CONN_AG_CTX_CF6_MASK      0x3      /* cf6 */
-#define TSTORM_CORE_CONN_AG_CTX_CF6_SHIFT     2
-#define TSTORM_CORE_CONN_AG_CTX_CF7_MASK      0x3      /* cf7 */
-#define TSTORM_CORE_CONN_AG_CTX_CF7_SHIFT     4
-#define TSTORM_CORE_CONN_AG_CTX_CF8_MASK      0x3      /* cf8 */
-#define TSTORM_CORE_CONN_AG_CTX_CF8_SHIFT     6
-       u8 flags3;
-#define TSTORM_CORE_CONN_AG_CTX_CF9_MASK      0x3      /* cf9 */
-#define TSTORM_CORE_CONN_AG_CTX_CF9_SHIFT     0
-#define TSTORM_CORE_CONN_AG_CTX_CF10_MASK     0x3      /* cf10 */
-#define TSTORM_CORE_CONN_AG_CTX_CF10_SHIFT    2
-#define TSTORM_CORE_CONN_AG_CTX_CF0EN_MASK    0x1      /* cf0en */
-#define TSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT   4
-#define TSTORM_CORE_CONN_AG_CTX_CF1EN_MASK    0x1      /* cf1en */
-#define TSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT   5
-#define TSTORM_CORE_CONN_AG_CTX_CF2EN_MASK    0x1      /* cf2en */
-#define TSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT   6
-#define TSTORM_CORE_CONN_AG_CTX_CF3EN_MASK    0x1      /* cf3en */
-#define TSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT   7
-       u8 flags4;
-#define TSTORM_CORE_CONN_AG_CTX_CF4EN_MASK    0x1      /* cf4en */
-#define TSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT   0
-#define TSTORM_CORE_CONN_AG_CTX_CF5EN_MASK    0x1      /* cf5en */
-#define TSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT   1
-#define TSTORM_CORE_CONN_AG_CTX_CF6EN_MASK    0x1      /* cf6en */
-#define TSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT   2
-#define TSTORM_CORE_CONN_AG_CTX_CF7EN_MASK    0x1      /* cf7en */
-#define TSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT   3
-#define TSTORM_CORE_CONN_AG_CTX_CF8EN_MASK    0x1      /* cf8en */
-#define TSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT   4
-#define TSTORM_CORE_CONN_AG_CTX_CF9EN_MASK    0x1      /* cf9en */
-#define TSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT   5
-#define TSTORM_CORE_CONN_AG_CTX_CF10EN_MASK   0x1      /* cf10en */
-#define TSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT  6
-#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK  0x1      /* rule0en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
-       u8 flags5;
-#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK  0x1      /* rule1en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK  0x1      /* rule2en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK  0x1      /* rule3en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK  0x1      /* rule4en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK  0x1      /* rule5en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK  0x1      /* rule6en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK  0x1      /* rule7en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_MASK  0x1      /* rule8en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
-       __le32 reg0;
-       __le32 reg1;
-       __le32 reg2;
-       __le32 reg3;
-       __le32 reg4;
-       __le32 reg5;
-       __le32 reg6;
-       __le32 reg7;
-       __le32 reg8;
-       u8 byte2;
-       u8 byte3;
-       __le16 word0;
-       u8 byte4;
-       u8 byte5;
-       __le16 word1;
-       __le16 word2;
-       __le16 word3;
-       __le32 reg9;
-       __le32 reg10;
-};
-
-struct ustorm_core_conn_ag_ctx {
-       u8 reserved;
-       u8 byte1;
-       u8 flags0;
-#define USTORM_CORE_CONN_AG_CTX_BIT0_MASK      0x1
-#define USTORM_CORE_CONN_AG_CTX_BIT0_SHIFT     0
-#define USTORM_CORE_CONN_AG_CTX_BIT1_MASK      0x1
-#define USTORM_CORE_CONN_AG_CTX_BIT1_SHIFT     1
-#define USTORM_CORE_CONN_AG_CTX_CF0_MASK       0x3
-#define USTORM_CORE_CONN_AG_CTX_CF0_SHIFT      2
-#define USTORM_CORE_CONN_AG_CTX_CF1_MASK       0x3
-#define USTORM_CORE_CONN_AG_CTX_CF1_SHIFT      4
-#define USTORM_CORE_CONN_AG_CTX_CF2_MASK       0x3
-#define USTORM_CORE_CONN_AG_CTX_CF2_SHIFT      6
-       u8 flags1;
-#define USTORM_CORE_CONN_AG_CTX_CF3_MASK       0x3
-#define USTORM_CORE_CONN_AG_CTX_CF3_SHIFT      0
-#define USTORM_CORE_CONN_AG_CTX_CF4_MASK       0x3
-#define USTORM_CORE_CONN_AG_CTX_CF4_SHIFT      2
-#define USTORM_CORE_CONN_AG_CTX_CF5_MASK       0x3
-#define USTORM_CORE_CONN_AG_CTX_CF5_SHIFT      4
-#define USTORM_CORE_CONN_AG_CTX_CF6_MASK       0x3
-#define USTORM_CORE_CONN_AG_CTX_CF6_SHIFT      6
-       u8 flags2;
-#define USTORM_CORE_CONN_AG_CTX_CF0EN_MASK     0x1
-#define USTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT    0
-#define USTORM_CORE_CONN_AG_CTX_CF1EN_MASK     0x1
-#define USTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT    1
-#define USTORM_CORE_CONN_AG_CTX_CF2EN_MASK     0x1
-#define USTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT    2
-#define USTORM_CORE_CONN_AG_CTX_CF3EN_MASK     0x1
-#define USTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT    3
-#define USTORM_CORE_CONN_AG_CTX_CF4EN_MASK     0x1
-#define USTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT    4
-#define USTORM_CORE_CONN_AG_CTX_CF5EN_MASK     0x1
-#define USTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT    5
-#define USTORM_CORE_CONN_AG_CTX_CF6EN_MASK     0x1
-#define USTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT    6
-#define USTORM_CORE_CONN_AG_CTX_RULE0EN_MASK   0x1
-#define USTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT  7
-       u8 flags3;
-#define USTORM_CORE_CONN_AG_CTX_RULE1EN_MASK   0x1
-#define USTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT  0
-#define USTORM_CORE_CONN_AG_CTX_RULE2EN_MASK   0x1
-#define USTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT  1
-#define USTORM_CORE_CONN_AG_CTX_RULE3EN_MASK   0x1
-#define USTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT  2
-#define USTORM_CORE_CONN_AG_CTX_RULE4EN_MASK   0x1
-#define USTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT  3
-#define USTORM_CORE_CONN_AG_CTX_RULE5EN_MASK   0x1
-#define USTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT  4
-#define USTORM_CORE_CONN_AG_CTX_RULE6EN_MASK   0x1
-#define USTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT  5
-#define USTORM_CORE_CONN_AG_CTX_RULE7EN_MASK   0x1
-#define USTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT  6
-#define USTORM_CORE_CONN_AG_CTX_RULE8EN_MASK   0x1
-#define USTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT  7
-       u8 byte2;
-       u8 byte3;
-       __le16 word0;
-       __le16 word1;
-       __le32 rx_producers;
-       __le32 reg1;
-       __le32 reg2;
-       __le32 reg3;
-       __le16 word2;
-       __le16 word3;
-};
-
-/* The core storm context for the Mstorm */
-struct mstorm_core_conn_st_ctx {
-       __le32 reserved[24];
-};
-
-/* The core storm context for the Ustorm */
-struct ustorm_core_conn_st_ctx {
-       __le32 reserved[4];
-};
-
-/* core connection context */
-struct core_conn_context {
-       struct ystorm_core_conn_st_ctx ystorm_st_context;
-       struct regpair ystorm_st_padding[2];
-       struct pstorm_core_conn_st_ctx pstorm_st_context;
-       struct regpair pstorm_st_padding[2];
-       struct xstorm_core_conn_st_ctx xstorm_st_context;
-       struct xstorm_core_conn_ag_ctx xstorm_ag_context;
-       struct tstorm_core_conn_ag_ctx tstorm_ag_context;
-       struct ustorm_core_conn_ag_ctx ustorm_ag_context;
-       struct mstorm_core_conn_st_ctx mstorm_st_context;
-       struct ustorm_core_conn_st_ctx ustorm_st_context;
-       struct regpair ustorm_st_padding[2];
-};
-
+/* How ll2 should deal with packet upon errors */
 enum core_error_handle {
        LL2_DROP_PACKET,
        LL2_DO_NOTHING,
@@ -570,21 +90,25 @@ enum core_error_handle {
        MAX_CORE_ERROR_HANDLE
 };
 
+/* Opcodes for the event ring */
 enum core_event_opcode {
        CORE_EVENT_TX_QUEUE_START,
        CORE_EVENT_TX_QUEUE_STOP,
        CORE_EVENT_RX_QUEUE_START,
        CORE_EVENT_RX_QUEUE_STOP,
        CORE_EVENT_RX_QUEUE_FLUSH,
+       CORE_EVENT_TX_QUEUE_UPDATE,
        MAX_CORE_EVENT_OPCODE
 };
 
+/* The L4 pseudo checksum mode for Core */
 enum core_l4_pseudo_checksum_mode {
        CORE_L4_PSEUDO_CSUM_CORRECT_LENGTH,
        CORE_L4_PSEUDO_CSUM_ZERO_LENGTH,
        MAX_CORE_L4_PSEUDO_CHECKSUM_MODE
 };
 
+/* Light-L2 RX Producers in Tstorm RAM */
 struct core_ll2_port_stats {
        struct regpair gsi_invalid_hdr;
        struct regpair gsi_invalid_pkt_length;
@@ -592,6 +116,7 @@ struct core_ll2_port_stats {
        struct regpair gsi_crcchksm_error;
 };
 
+/* Ethernet TX Per Queue Stats */
 struct core_ll2_pstorm_per_queue_stat {
        struct regpair sent_ucast_bytes;
        struct regpair sent_mcast_bytes;
@@ -601,6 +126,7 @@ struct core_ll2_pstorm_per_queue_stat {
        struct regpair sent_bcast_pkts;
 };
 
+/* Light-L2 RX Producers in Tstorm RAM */
 struct core_ll2_rx_prod {
        __le16 bd_prod;
        __le16 cqe_prod;
@@ -621,6 +147,7 @@ struct core_ll2_ustorm_per_queue_stat {
        struct regpair rcv_bcast_pkts;
 };
 
+/* Core Ramrod Command IDs (light L2) */
 enum core_ramrod_cmd_id {
        CORE_RAMROD_UNUSED,
        CORE_RAMROD_RX_QUEUE_START,
@@ -628,53 +155,64 @@ enum core_ramrod_cmd_id {
        CORE_RAMROD_RX_QUEUE_STOP,
        CORE_RAMROD_TX_QUEUE_STOP,
        CORE_RAMROD_RX_QUEUE_FLUSH,
+       CORE_RAMROD_TX_QUEUE_UPDATE,
        MAX_CORE_RAMROD_CMD_ID
 };
 
+/* Core RX CQE Type for Light L2 */
 enum core_roce_flavor_type {
        CORE_ROCE,
        CORE_RROCE,
        MAX_CORE_ROCE_FLAVOR_TYPE
 };
 
+/* Specifies how ll2 should deal with packets errors: packet_too_big and
+ * no_buff.
+ */
 struct core_rx_action_on_error {
        u8 error_type;
 #define CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG_MASK    0x3
-#define CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG_SHIFT 0
-#define CORE_RX_ACTION_ON_ERROR_NO_BUFF_MASK   0x3
-#define CORE_RX_ACTION_ON_ERROR_NO_BUFF_SHIFT  2
-#define CORE_RX_ACTION_ON_ERROR_RESERVED_MASK  0xF
-#define CORE_RX_ACTION_ON_ERROR_RESERVED_SHIFT 4
+#define CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG_SHIFT   0
+#define CORE_RX_ACTION_ON_ERROR_NO_BUFF_MASK           0x3
+#define CORE_RX_ACTION_ON_ERROR_NO_BUFF_SHIFT          2
+#define CORE_RX_ACTION_ON_ERROR_RESERVED_MASK          0xF
+#define CORE_RX_ACTION_ON_ERROR_RESERVED_SHIFT         4
 };
 
+/* Core RX BD for Light L2 */
 struct core_rx_bd {
        struct regpair addr;
        __le16 reserved[4];
 };
 
+/* Core RX CM offload BD for Light L2 */
 struct core_rx_bd_with_buff_len {
        struct regpair addr;
        __le16 buff_length;
        __le16 reserved[3];
 };
 
+/* Core RX CM offload BD for Light L2 */
 union core_rx_bd_union {
        struct core_rx_bd rx_bd;
        struct core_rx_bd_with_buff_len rx_bd_with_len;
 };
 
+/* Opaque Data for Light L2 RX CQE */
 struct core_rx_cqe_opaque_data {
        __le32 data[2];
 };
 
+/* Core RX CQE Type for Light L2 */
 enum core_rx_cqe_type {
-       CORE_RX_CQE_ILLIGAL_TYPE,
+       CORE_RX_CQE_ILLEGAL_TYPE,
        CORE_RX_CQE_TYPE_REGULAR,
        CORE_RX_CQE_TYPE_GSI_OFFLOAD,
        CORE_RX_CQE_TYPE_SLOW_PATH,
        MAX_CORE_RX_CQE_TYPE
 };
 
+/* Core RX CQE for Light L2 */
 struct core_rx_fast_path_cqe {
        u8 type;
        u8 placement_offset;
@@ -687,6 +225,7 @@ struct core_rx_fast_path_cqe {
        __le32 reserved1[3];
 };
 
+/* Core Rx CM offload CQE */
 struct core_rx_gsi_offload_cqe {
        u8 type;
        u8 data_length_error;
@@ -696,9 +235,11 @@ struct core_rx_gsi_offload_cqe {
        __le32 src_mac_addrhi;
        __le16 src_mac_addrlo;
        __le16 qp_id;
-       __le32 gid_dst[4];
+       __le32 src_qp;
+       __le32 reserved[3];
 };
 
+/* Core RX CQE for Light L2 */
 struct core_rx_slow_path_cqe {
        u8 type;
        u8 ramrod_cmd_id;
@@ -707,12 +248,14 @@ struct core_rx_slow_path_cqe {
        __le32 reserved1[5];
 };
 
+/* Core RX CM offload BD for Light L2 */
 union core_rx_cqe_union {
        struct core_rx_fast_path_cqe rx_cqe_fp;
        struct core_rx_gsi_offload_cqe rx_cqe_gsi;
        struct core_rx_slow_path_cqe rx_cqe_sp;
 };
 
+/* Ramrod data for rx queue start ramrod */
 struct core_rx_start_ramrod_data {
        struct regpair bd_base;
        struct regpair cqe_pbl_addr;
@@ -723,16 +266,18 @@ struct core_rx_start_ramrod_data {
        u8 complete_event_flg;
        u8 drop_ttl0_flg;
        __le16 num_of_pbl_pages;
-       u8 inner_vlan_removal_en;
+       u8 inner_vlan_stripping_en;
+       u8 report_outer_vlan;
        u8 queue_id;
        u8 main_func_queue;
        u8 mf_si_bcast_accept_all;
        u8 mf_si_mcast_accept_all;
        struct core_rx_action_on_error action_on_error;
        u8 gsi_offload_flag;
-       u8 reserved[7];
+       u8 reserved[6];
 };
 
+/* Ramrod data for rx queue stop ramrod */
 struct core_rx_stop_ramrod_data {
        u8 complete_cqe_flg;
        u8 complete_event_flg;
@@ -741,46 +286,51 @@ struct core_rx_stop_ramrod_data {
        __le16 reserved2[2];
 };
 
+/* Flags for Core TX BD */
 struct core_tx_bd_data {
        __le16 as_bitfield;
-#define CORE_TX_BD_DATA_FORCE_VLAN_MODE_MASK   0x1
-#define CORE_TX_BD_DATA_FORCE_VLAN_MODE_SHIFT     0
-#define CORE_TX_BD_DATA_VLAN_INSERTION_MASK    0x1
-#define CORE_TX_BD_DATA_VLAN_INSERTION_SHIFT      1
-#define CORE_TX_BD_DATA_START_BD_MASK  0x1
-#define CORE_TX_BD_DATA_START_BD_SHIFT            2
-#define CORE_TX_BD_DATA_IP_CSUM_MASK   0x1
-#define CORE_TX_BD_DATA_IP_CSUM_SHIFT             3
-#define CORE_TX_BD_DATA_L4_CSUM_MASK   0x1
-#define CORE_TX_BD_DATA_L4_CSUM_SHIFT             4
-#define CORE_TX_BD_DATA_IPV6_EXT_MASK  0x1
-#define CORE_TX_BD_DATA_IPV6_EXT_SHIFT            5
-#define CORE_TX_BD_DATA_L4_PROTOCOL_MASK       0x1
-#define CORE_TX_BD_DATA_L4_PROTOCOL_SHIFT         6
+#define CORE_TX_BD_DATA_FORCE_VLAN_MODE_MASK           0x1
+#define CORE_TX_BD_DATA_FORCE_VLAN_MODE_SHIFT          0
+#define CORE_TX_BD_DATA_VLAN_INSERTION_MASK            0x1
+#define CORE_TX_BD_DATA_VLAN_INSERTION_SHIFT           1
+#define CORE_TX_BD_DATA_START_BD_MASK                  0x1
+#define CORE_TX_BD_DATA_START_BD_SHIFT                 2
+#define CORE_TX_BD_DATA_IP_CSUM_MASK                   0x1
+#define CORE_TX_BD_DATA_IP_CSUM_SHIFT                  3
+#define CORE_TX_BD_DATA_L4_CSUM_MASK                   0x1
+#define CORE_TX_BD_DATA_L4_CSUM_SHIFT                  4
+#define CORE_TX_BD_DATA_IPV6_EXT_MASK                  0x1
+#define CORE_TX_BD_DATA_IPV6_EXT_SHIFT                 5
+#define CORE_TX_BD_DATA_L4_PROTOCOL_MASK               0x1
+#define CORE_TX_BD_DATA_L4_PROTOCOL_SHIFT              6
 #define CORE_TX_BD_DATA_L4_PSEUDO_CSUM_MODE_MASK       0x1
-#define CORE_TX_BD_DATA_L4_PSEUDO_CSUM_MODE_SHIFT 7
-#define CORE_TX_BD_DATA_NBDS_MASK      0xF
-#define CORE_TX_BD_DATA_NBDS_SHIFT                8
-#define CORE_TX_BD_DATA_ROCE_FLAV_MASK 0x1
-#define CORE_TX_BD_DATA_ROCE_FLAV_SHIFT           12
-#define CORE_TX_BD_DATA_IP_LEN_MASK    0x1
-#define CORE_TX_BD_DATA_IP_LEN_SHIFT              13
-#define CORE_TX_BD_DATA_RESERVED0_MASK            0x3
-#define CORE_TX_BD_DATA_RESERVED0_SHIFT           14
-};
-
+#define CORE_TX_BD_DATA_L4_PSEUDO_CSUM_MODE_SHIFT      7
+#define CORE_TX_BD_DATA_NBDS_MASK                      0xF
+#define CORE_TX_BD_DATA_NBDS_SHIFT                     8
+#define CORE_TX_BD_DATA_ROCE_FLAV_MASK                 0x1
+#define CORE_TX_BD_DATA_ROCE_FLAV_SHIFT                        12
+#define CORE_TX_BD_DATA_IP_LEN_MASK                    0x1
+#define CORE_TX_BD_DATA_IP_LEN_SHIFT                   13
+#define CORE_TX_BD_DATA_DISABLE_STAG_INSERTION_MASK    0x1
+#define CORE_TX_BD_DATA_DISABLE_STAG_INSERTION_SHIFT   14
+#define CORE_TX_BD_DATA_RESERVED0_MASK                 0x1
+#define CORE_TX_BD_DATA_RESERVED0_SHIFT                        15
+};
+
+/* Core TX BD for Light L2 */
 struct core_tx_bd {
        struct regpair addr;
        __le16 nbytes;
        __le16 nw_vlan_or_lb_echo;
        struct core_tx_bd_data bd_data;
        __le16 bitfield1;
-#define CORE_TX_BD_L4_HDR_OFFSET_W_MASK        0x3FFF
-#define CORE_TX_BD_L4_HDR_OFFSET_W_SHIFT 0
-#define CORE_TX_BD_TX_DST_MASK         0x3
-#define CORE_TX_BD_TX_DST_SHIFT                14
+#define CORE_TX_BD_L4_HDR_OFFSET_W_MASK                0x3FFF
+#define CORE_TX_BD_L4_HDR_OFFSET_W_SHIFT       0
+#define CORE_TX_BD_TX_DST_MASK                 0x3
+#define CORE_TX_BD_TX_DST_SHIFT                        14
 };
 
+/* Light L2 TX Destination */
 enum core_tx_dest {
        CORE_TX_DEST_NW,
        CORE_TX_DEST_LB,
@@ -789,6 +339,7 @@ enum core_tx_dest {
        MAX_CORE_TX_DEST
 };
 
+/* Ramrod data for tx queue start ramrod */
 struct core_tx_start_ramrod_data {
        struct regpair pbl_base_addr;
        __le16 mtu;
@@ -803,10 +354,20 @@ struct core_tx_start_ramrod_data {
        u8 resrved[3];
 };
 
+/* Ramrod data for tx queue stop ramrod */
 struct core_tx_stop_ramrod_data {
        __le32 reserved0[2];
 };
 
+/* Ramrod data for tx queue update ramrod */
+struct core_tx_update_ramrod_data {
+       u8 update_qm_pq_id_flg;
+       u8 reserved0;
+       __le16 qm_pq_id;
+       __le32 reserved1[1];
+};
+
+/* Enum flag for what type of dcb data to update */
 enum dcb_dscp_update_mode {
        DONT_UPDATE_DCB_DSCP,
        UPDATE_DCB,
@@ -815,6 +376,487 @@ enum dcb_dscp_update_mode {
        MAX_DCB_DSCP_UPDATE_MODE
 };
 
+/* The core storm context for the Ystorm */
+struct ystorm_core_conn_st_ctx {
+       __le32 reserved[4];
+};
+
+/* The core storm context for the Pstorm */
+struct pstorm_core_conn_st_ctx {
+       __le32 reserved[4];
+};
+
+/* Core Slowpath Connection storm context of Xstorm */
+struct xstorm_core_conn_st_ctx {
+       __le32 spq_base_lo;
+       __le32 spq_base_hi;
+       struct regpair consolid_base_addr;
+       __le16 spq_cons;
+       __le16 consolid_cons;
+       __le32 reserved0[55];
+};
+
+struct e4_xstorm_core_conn_ag_ctx {
+       u8 reserved0;
+       u8 state;
+       u8 flags0;
+#define E4_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_MASK   0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT  0
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED1_MASK      0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED1_SHIFT     1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED2_MASK      0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED2_SHIFT     2
+#define E4_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_MASK   0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT  3
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED3_MASK      0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED3_SHIFT     4
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED4_MASK      0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED4_SHIFT     5
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED5_MASK      0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED5_SHIFT     6
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED6_MASK      0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED6_SHIFT     7
+       u8 flags1;
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED7_MASK      0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED7_SHIFT     0
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED8_MASK      0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED8_SHIFT     1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED9_MASK      0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED9_SHIFT     2
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT11_MASK          0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT11_SHIFT         3
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT12_MASK          0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT12_SHIFT         4
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT13_MASK          0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT13_SHIFT         5
+#define E4_XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT        6
+#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_MASK   0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT  7
+       u8 flags2;
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF0_MASK    0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF0_SHIFT   0
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF1_MASK    0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF1_SHIFT   2
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF2_MASK    0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF2_SHIFT   4
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF3_MASK    0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF3_SHIFT   6
+       u8 flags3;
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF4_MASK    0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF4_SHIFT   0
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF5_MASK    0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF5_SHIFT   2
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF6_MASK    0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF6_SHIFT   4
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF7_MASK    0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF7_SHIFT   6
+       u8 flags4;
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF8_MASK    0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF8_SHIFT   0
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF9_MASK    0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF9_SHIFT   2
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF10_MASK   0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF10_SHIFT  4
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF11_MASK   0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF11_SHIFT  6
+       u8 flags5;
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF12_MASK   0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF12_SHIFT  0
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF13_MASK   0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF13_SHIFT  2
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF14_MASK   0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF14_SHIFT  4
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF15_MASK   0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF15_SHIFT  6
+       u8 flags6;
+#define E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_MASK       0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_SHIFT      0
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF17_MASK                   0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF17_SHIFT                  2
+#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_MASK                  0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_SHIFT                 4
+#define E4_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_MASK           0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_SHIFT          6
+       u8 flags7;
+#define E4_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_MASK       0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_SHIFT      0
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED10_MASK     0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED10_SHIFT    2
+#define E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_MASK      0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_SHIFT     4
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF0EN_MASK          0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT         6
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF1EN_MASK          0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT         7
+       u8 flags8;
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF2EN_MASK  0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 0
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF3EN_MASK  0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF4EN_MASK  0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 2
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF5EN_MASK  0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF6EN_MASK  0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 4
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF7EN_MASK  0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 5
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF8EN_MASK  0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 6
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF9EN_MASK  0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 7
+       u8 flags9;
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF10EN_MASK                 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT                        0
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF11EN_MASK                 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF11EN_SHIFT                        1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF12EN_MASK                 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF12EN_SHIFT                        2
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF13EN_MASK                 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF13EN_SHIFT                        3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF14EN_MASK                 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF14EN_SHIFT                        4
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF15EN_MASK                 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF15EN_SHIFT                        5
+#define E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_MASK    0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_SHIFT   6
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF17EN_MASK                 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF17EN_SHIFT                        7
+       u8 flags10;
+#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_MASK               0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_SHIFT              0
+#define E4_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_MASK                0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT       1
+#define E4_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_MASK            0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT           2
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED11_MASK             0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED11_SHIFT            3
+#define E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_MASK           0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT          4
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF23EN_MASK                 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF23EN_SHIFT                        5
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED12_MASK             0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED12_SHIFT            6
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED13_MASK             0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED13_SHIFT            7
+       u8 flags11;
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED14_MASK     0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED14_SHIFT    0
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED15_MASK     0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED15_SHIFT    1
+#define E4_XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT        2
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK                0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT       3
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK                0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT       4
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK                0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT       5
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_MASK   0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_SHIFT  6
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE9EN_MASK                0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE9EN_SHIFT       7
+       u8 flags12;
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE10EN_MASK       0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE10EN_SHIFT      0
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE11EN_MASK       0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE11EN_SHIFT      1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_MASK   0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_SHIFT  2
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_MASK   0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_SHIFT  3
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE14EN_MASK       0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE14EN_SHIFT      4
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE15EN_MASK       0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE15EN_SHIFT      5
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE16EN_MASK       0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE16EN_SHIFT      6
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE17EN_MASK       0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE17EN_SHIFT      7
+       u8 flags13;
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE18EN_MASK       0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE18EN_SHIFT      0
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE19EN_MASK       0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE19EN_SHIFT      1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_MASK   0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_SHIFT  2
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_MASK   0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_SHIFT  3
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_MASK   0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_SHIFT  4
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_MASK   0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_SHIFT  5
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_MASK   0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_SHIFT  6
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_MASK   0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_SHIFT  7
+       u8 flags14;
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT16_MASK  0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT16_SHIFT 0
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT17_MASK  0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT17_SHIFT 1
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT18_MASK  0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT18_SHIFT 2
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT19_MASK  0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT19_SHIFT 3
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT20_MASK  0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT20_SHIFT 4
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT21_MASK  0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT21_SHIFT 5
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF23_MASK   0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF23_SHIFT  6
+       u8 byte2;
+       __le16 physical_q0;
+       __le16 consolid_prod;
+       __le16 reserved16;
+       __le16 tx_bd_cons;
+       __le16 tx_bd_or_spq_prod;
+       __le16 word5;
+       __le16 conn_dpi;
+       u8 byte3;
+       u8 byte4;
+       u8 byte5;
+       u8 byte6;
+       __le32 reg0;
+       __le32 reg1;
+       __le32 reg2;
+       __le32 reg3;
+       __le32 reg4;
+       __le32 reg5;
+       __le32 reg6;
+       __le16 word7;
+       __le16 word8;
+       __le16 word9;
+       __le16 word10;
+       __le32 reg7;
+       __le32 reg8;
+       __le32 reg9;
+       u8 byte7;
+       u8 byte8;
+       u8 byte9;
+       u8 byte10;
+       u8 byte11;
+       u8 byte12;
+       u8 byte13;
+       u8 byte14;
+       u8 byte15;
+       u8 e5_reserved;
+       __le16 word11;
+       __le32 reg10;
+       __le32 reg11;
+       __le32 reg12;
+       __le32 reg13;
+       __le32 reg14;
+       __le32 reg15;
+       __le32 reg16;
+       __le32 reg17;
+       __le32 reg18;
+       __le32 reg19;
+       __le16 word12;
+       __le16 word13;
+       __le16 word14;
+       __le16 word15;
+};
+
+struct e4_tstorm_core_conn_ag_ctx {
+       u8 byte0;
+       u8 byte1;
+       u8 flags0;
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT0_MASK   0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT  0
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT1_MASK   0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT  1
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT2_MASK   0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT2_SHIFT  2
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT3_MASK   0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT3_SHIFT  3
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT4_MASK   0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT4_SHIFT  4
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT5_MASK   0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT5_SHIFT  5
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF0_MASK    0x3
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF0_SHIFT   6
+       u8 flags1;
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF1_MASK    0x3
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF1_SHIFT   0
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF2_MASK    0x3
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF2_SHIFT   2
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF3_MASK    0x3
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF3_SHIFT   4
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF4_MASK    0x3
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF4_SHIFT   6
+       u8 flags2;
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF5_MASK    0x3
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF5_SHIFT   0
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF6_MASK    0x3
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF6_SHIFT   2
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF7_MASK    0x3
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF7_SHIFT   4
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF8_MASK    0x3
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF8_SHIFT   6
+       u8 flags3;
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF9_MASK    0x3
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF9_SHIFT   0
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF10_MASK   0x3
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF10_SHIFT  2
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF0EN_MASK  0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 4
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF1EN_MASK  0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 5
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF2EN_MASK  0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 6
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF3EN_MASK  0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 7
+       u8 flags4;
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF4EN_MASK          0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT         0
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF5EN_MASK          0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT         1
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF6EN_MASK          0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT         2
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF7EN_MASK          0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT         3
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF8EN_MASK          0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT         4
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF9EN_MASK          0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT         5
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF10EN_MASK         0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT                6
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK                0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT       7
+       u8 flags5;
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK                0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT       0
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK                0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT       1
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK                0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT       2
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK                0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT       3
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK                0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT       4
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK                0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT       5
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK                0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT       6
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE8EN_MASK                0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT       7
+       __le32 reg0;
+       __le32 reg1;
+       __le32 reg2;
+       __le32 reg3;
+       __le32 reg4;
+       __le32 reg5;
+       __le32 reg6;
+       __le32 reg7;
+       __le32 reg8;
+       u8 byte2;
+       u8 byte3;
+       __le16 word0;
+       u8 byte4;
+       u8 byte5;
+       __le16 word1;
+       __le16 word2;
+       __le16 word3;
+       __le32 reg9;
+       __le32 reg10;
+};
+
+struct e4_ustorm_core_conn_ag_ctx {
+       u8 reserved;
+       u8 byte1;
+       u8 flags0;
+#define E4_USTORM_CORE_CONN_AG_CTX_BIT0_MASK   0x1
+#define E4_USTORM_CORE_CONN_AG_CTX_BIT0_SHIFT  0
+#define E4_USTORM_CORE_CONN_AG_CTX_BIT1_MASK   0x1
+#define E4_USTORM_CORE_CONN_AG_CTX_BIT1_SHIFT  1
+#define E4_USTORM_CORE_CONN_AG_CTX_CF0_MASK    0x3
+#define E4_USTORM_CORE_CONN_AG_CTX_CF0_SHIFT   2
+#define E4_USTORM_CORE_CONN_AG_CTX_CF1_MASK    0x3
+#define E4_USTORM_CORE_CONN_AG_CTX_CF1_SHIFT   4
+#define E4_USTORM_CORE_CONN_AG_CTX_CF2_MASK    0x3
+#define E4_USTORM_CORE_CONN_AG_CTX_CF2_SHIFT   6
+       u8 flags1;
+#define E4_USTORM_CORE_CONN_AG_CTX_CF3_MASK    0x3
+#define E4_USTORM_CORE_CONN_AG_CTX_CF3_SHIFT   0
+#define E4_USTORM_CORE_CONN_AG_CTX_CF4_MASK    0x3
+#define E4_USTORM_CORE_CONN_AG_CTX_CF4_SHIFT   2
+#define E4_USTORM_CORE_CONN_AG_CTX_CF5_MASK    0x3
+#define E4_USTORM_CORE_CONN_AG_CTX_CF5_SHIFT   4
+#define E4_USTORM_CORE_CONN_AG_CTX_CF6_MASK    0x3
+#define E4_USTORM_CORE_CONN_AG_CTX_CF6_SHIFT   6
+       u8 flags2;
+#define E4_USTORM_CORE_CONN_AG_CTX_CF0EN_MASK          0x1
+#define E4_USTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT         0
+#define E4_USTORM_CORE_CONN_AG_CTX_CF1EN_MASK          0x1
+#define E4_USTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT         1
+#define E4_USTORM_CORE_CONN_AG_CTX_CF2EN_MASK          0x1
+#define E4_USTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT         2
+#define E4_USTORM_CORE_CONN_AG_CTX_CF3EN_MASK          0x1
+#define E4_USTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT         3
+#define E4_USTORM_CORE_CONN_AG_CTX_CF4EN_MASK          0x1
+#define E4_USTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT         4
+#define E4_USTORM_CORE_CONN_AG_CTX_CF5EN_MASK          0x1
+#define E4_USTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT         5
+#define E4_USTORM_CORE_CONN_AG_CTX_CF6EN_MASK          0x1
+#define E4_USTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT         6
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE0EN_MASK                0x1
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT       7
+       u8 flags3;
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE1EN_MASK                0x1
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT       0
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE2EN_MASK                0x1
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT       1
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE3EN_MASK                0x1
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT       2
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE4EN_MASK                0x1
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT       3
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE5EN_MASK                0x1
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT       4
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE6EN_MASK                0x1
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT       5
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE7EN_MASK                0x1
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT       6
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE8EN_MASK                0x1
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT       7
+       u8 byte2;
+       u8 byte3;
+       __le16 word0;
+       __le16 word1;
+       __le32 rx_producers;
+       __le32 reg1;
+       __le32 reg2;
+       __le32 reg3;
+       __le16 word2;
+       __le16 word3;
+};
+
+/* The core storm context for the Mstorm */
+struct mstorm_core_conn_st_ctx {
+       __le32 reserved[24];
+};
+
+/* The core storm context for the Ustorm */
+struct ustorm_core_conn_st_ctx {
+       __le32 reserved[4];
+};
+
+/* core connection context */
+struct e4_core_conn_context {
+       struct ystorm_core_conn_st_ctx ystorm_st_context;
+       struct regpair ystorm_st_padding[2];
+       struct pstorm_core_conn_st_ctx pstorm_st_context;
+       struct regpair pstorm_st_padding[2];
+       struct xstorm_core_conn_st_ctx xstorm_st_context;
+       struct e4_xstorm_core_conn_ag_ctx xstorm_ag_context;
+       struct e4_tstorm_core_conn_ag_ctx tstorm_ag_context;
+       struct e4_ustorm_core_conn_ag_ctx ustorm_ag_context;
+       struct mstorm_core_conn_st_ctx mstorm_st_context;
+       struct ustorm_core_conn_st_ctx ustorm_st_context;
+       struct regpair ustorm_st_padding[2];
+};
+
 struct eth_mstorm_per_pf_stat {
        struct regpair gre_discard_pkts;
        struct regpair vxlan_discard_pkts;
@@ -896,6 +938,50 @@ struct eth_ustorm_per_queue_stat {
        struct regpair rcv_bcast_pkts;
 };
 
+/* Event Ring VF-PF Channel data */
+struct vf_pf_channel_eqe_data {
+       struct regpair msg_addr;
+};
+
+/* Event Ring malicious VF data */
+struct malicious_vf_eqe_data {
+       u8 vf_id;
+       u8 err_id;
+       __le16 reserved[3];
+};
+
+/* Event Ring initial cleanup data */
+struct initial_cleanup_eqe_data {
+       u8 vf_id;
+       u8 reserved[7];
+};
+
+/* Event Data Union */
+union event_ring_data {
+       u8 bytes[8];
+       struct vf_pf_channel_eqe_data vf_pf_channel;
+       struct iscsi_eqe_data iscsi_info;
+       struct iscsi_connect_done_results iscsi_conn_done_info;
+       union rdma_eqe_data rdma_data;
+       struct malicious_vf_eqe_data malicious_vf;
+       struct initial_cleanup_eqe_data vf_init_cleanup;
+};
+
+/* Event Ring Entry */
+struct event_ring_entry {
+       u8 protocol_id;
+       u8 opcode;
+       __le16 reserved0;
+       __le16 echo;
+       u8 fw_return_code;
+       u8 flags;
+#define EVENT_RING_ENTRY_ASYNC_MASK            0x1
+#define EVENT_RING_ENTRY_ASYNC_SHIFT           0
+#define EVENT_RING_ENTRY_RESERVED1_MASK                0x7F
+#define EVENT_RING_ENTRY_RESERVED1_SHIFT       1
+       union event_ring_data data;
+};
+
 /* Event Ring Next Page Address */
 struct event_ring_next_addr {
        struct regpair addr;
@@ -908,12 +994,21 @@ union event_ring_element {
        struct event_ring_next_addr next_addr;
 };
 
+/* Ports mode */
 enum fw_flow_ctrl_mode {
        flow_ctrl_pause,
        flow_ctrl_pfc,
        MAX_FW_FLOW_CTRL_MODE
 };
 
+/* GFT profile type */
+enum gft_profile_type {
+       GFT_PROFILE_TYPE_4_TUPLE,
+       GFT_PROFILE_TYPE_L4_DST_PORT,
+       GFT_PROFILE_TYPE_IP_DST_PORT,
+       MAX_GFT_PROFILE_TYPE
+};
+
 /* Major and Minor hsi Versions */
 struct hsi_fp_ver_struct {
        u8 minor_ver_arr[2];
@@ -921,14 +1016,14 @@ struct hsi_fp_ver_struct {
 };
 
 enum iwarp_ll2_tx_queues {
-       IWARP_LL2_IN_ORDER_TX_QUEUE =                   1,
+       IWARP_LL2_IN_ORDER_TX_QUEUE = 1,
        IWARP_LL2_ALIGNED_TX_QUEUE,
        IWARP_LL2_ALIGNED_RIGHT_TRIMMED_TX_QUEUE,
        IWARP_LL2_ERROR,
        MAX_IWARP_LL2_TX_QUEUES
 };
 
-/* Mstorm non-triggering VF zone */
+/* Malicious VF error ID */
 enum malicious_vf_error_id {
        MALICIOUS_VF_NO_ERROR,
        VF_PF_CHANNEL_NOT_READY,
@@ -951,9 +1046,11 @@ enum malicious_vf_error_id {
        ETH_TUNN_IPV6_EXT_NBD_ERR,
        ETH_CONTROL_PACKET_VIOLATION,
        ETH_ANTI_SPOOFING_ERR,
+       ETH_PACKET_SIZE_TOO_LARGE,
        MAX_MALICIOUS_VF_ERROR_ID
 };
 
+/* Mstorm non-triggering VF zone */
 struct mstorm_non_trigger_vf_zone {
        struct eth_mstorm_per_queue_stat eth_queue_stat;
        struct eth_rx_prod_data eth_rx_queue_producers[ETH_MAX_NUM_RX_QUEUES_PER_VF_QUAD];
@@ -962,7 +1059,21 @@ struct mstorm_non_trigger_vf_zone {
 /* Mstorm VF zone */
 struct mstorm_vf_zone {
        struct mstorm_non_trigger_vf_zone non_trigger;
+};
+
+/* vlan header including TPID and TCI fields */
+struct vlan_header {
+       __le16 tpid;
+       __le16 tci;
+};
 
+/* outer tag configurations */
+struct outer_tag_config_struct {
+       u8 enable_stag_pri_change;
+       u8 pri_map_valid;
+       u8 reserved[2];
+       struct vlan_header outer_tag;
+       u8 inner_to_outer_pri_map[8];
 };
 
 /* personality per PF */
@@ -974,7 +1085,7 @@ enum personality_type {
        PERSONALITY_RDMA,
        PERSONALITY_CORE,
        PERSONALITY_ETH,
-       PERSONALITY_RESERVED4,
+       PERSONALITY_RESERVED,
        MAX_PERSONALITY_TYPE
 };
 
@@ -997,7 +1108,6 @@ struct pf_start_ramrod_data {
        struct regpair event_ring_pbl_addr;
        struct regpair consolid_q_pbl_addr;
        struct pf_start_tunnel_config tunnel_config;
-       __le32 reserved;
        __le16 event_ring_sb_id;
        u8 base_vf_id;
        u8 num_vfs;
@@ -1011,21 +1121,22 @@ struct pf_start_ramrod_data {
        u8 mf_mode;
        u8 integ_phase;
        u8 allow_npar_tx_switching;
-       u8 inner_to_outer_pri_map[8];
-       u8 pri_map_valid;
-       __le32 outer_tag;
+       u8 reserved0;
        struct hsi_fp_ver_struct hsi_fp_ver;
+       struct outer_tag_config_struct outer_tag_config;
 };
 
+/* Data for port update ramrod */
 struct protocol_dcb_data {
        u8 dcb_enable_flag;
-       u8 reserved_a;
+       u8 dscp_enable_flag;
        u8 dcb_priority;
        u8 dcb_tc;
-       u8 reserved_b;
+       u8 dscp_val;
        u8 reserved0;
 };
 
+/* Update tunnel configuration */
 struct pf_update_tunnel_config {
        u8 update_rx_pf_clss;
        u8 update_rx_def_ucast_clss;
@@ -1042,8 +1153,8 @@ struct pf_update_tunnel_config {
        __le16 reserved;
 };
 
+/* Data for port update ramrod */
 struct pf_update_ramrod_data {
-       u8 pf_id;
        u8 update_eth_dcb_data_mode;
        u8 update_fcoe_dcb_data_mode;
        u8 update_iscsi_dcb_data_mode;
@@ -1051,6 +1162,7 @@ struct pf_update_ramrod_data {
        u8 update_rroce_dcb_data_mode;
        u8 update_iwarp_dcb_data_mode;
        u8 update_mf_vlan_flag;
+       u8 update_enable_stag_pri_change;
        struct protocol_dcb_data eth_dcb_data;
        struct protocol_dcb_data fcoe_dcb_data;
        struct protocol_dcb_data iscsi_dcb_data;
@@ -1058,7 +1170,8 @@ struct pf_update_ramrod_data {
        struct protocol_dcb_data rroce_dcb_data;
        struct protocol_dcb_data iwarp_dcb_data;
        __le16 mf_vlan;
-       __le16 reserved;
+       u8 enable_stag_pri_change;
+       u8 reserved;
        struct pf_update_tunnel_config tunnel_config;
 };
 
@@ -1079,11 +1192,13 @@ enum protocol_version_array_key {
        MAX_PROTOCOL_VERSION_ARRAY_KEY
 };
 
+/* RDMA TX Stats */
 struct rdma_sent_stats {
        struct regpair sent_bytes;
        struct regpair sent_pkts;
 };
 
+/* Pstorm non-triggering VF zone */
 struct pstorm_non_trigger_vf_zone {
        struct eth_pstorm_per_queue_stat eth_queue_stat;
        struct rdma_sent_stats rdma_stats;
@@ -1103,11 +1218,34 @@ struct ramrod_header {
        __le16 echo;
 };
 
+/* RDMA RX Stats */
 struct rdma_rcv_stats {
        struct regpair rcv_bytes;
        struct regpair rcv_pkts;
 };
 
+/* Data for update QCN/DCQCN RL ramrod */
+struct rl_update_ramrod_data {
+       u8 qcn_update_param_flg;
+       u8 dcqcn_update_param_flg;
+       u8 rl_init_flg;
+       u8 rl_start_flg;
+       u8 rl_stop_flg;
+       u8 rl_id_first;
+       u8 rl_id_last;
+       u8 rl_dc_qcn_flg;
+       __le32 rl_bc_rate;
+       __le16 rl_max_rate;
+       __le16 rl_r_ai;
+       __le16 rl_r_hai;
+       __le16 dcqcn_g;
+       __le32 dcqcn_k_us;
+       __le32 dcqcn_timeuot_us;
+       __le32 qcn_timeuot_us;
+       __le32 reserved[2];
+};
+
+/* Slowpath Element (SPQE) */
 struct slow_path_element {
        struct ramrod_header hdr;
        struct regpair data_ptr;
@@ -1130,11 +1268,12 @@ struct tstorm_per_port_stat {
        struct regpair roce_irregular_pkt;
        struct regpair iwarp_irregular_pkt;
        struct regpair eth_irregular_pkt;
-       struct regpair reserved1;
+       struct regpair toe_irregular_pkt;
        struct regpair preroce_irregular_pkt;
        struct regpair eth_gre_tunn_filter_discard;
        struct regpair eth_vxlan_tunn_filter_discard;
        struct regpair eth_geneve_tunn_filter_discard;
+       struct regpair eth_gft_drop_pkt;
 };
 
 /* Tstorm VF zone */
@@ -1197,6 +1336,7 @@ struct vf_stop_ramrod_data {
        __le32 reserved2;
 };
 
+/* VF zone size mode */
 enum vf_zone_size_mode {
        VF_ZONE_SIZE_MODE_DEFAULT,
        VF_ZONE_SIZE_MODE_DOUBLE,
@@ -1204,6 +1344,7 @@ enum vf_zone_size_mode {
        MAX_VF_ZONE_SIZE_MODE
 };
 
+/* Attentions status block */
 struct atten_status_block {
        __le32 atten_bits;
        __le32 atten_ack;
@@ -1212,12 +1353,6 @@ struct atten_status_block {
        __le32 reserved1;
 };
 
-enum command_type_bit {
-       IGU_COMMAND_TYPE_NOP = 0,
-       IGU_COMMAND_TYPE_SET = 1,
-       MAX_COMMAND_TYPE_BIT
-};
-
 /* DMAE command */
 struct dmae_cmd {
        __le32 opcode;
@@ -1327,74 +1462,74 @@ enum dmae_cmd_src_enum {
        MAX_DMAE_CMD_SRC_ENUM
 };
 
-struct mstorm_core_conn_ag_ctx {
+struct e4_mstorm_core_conn_ag_ctx {
        u8 byte0;
        u8 byte1;
        u8 flags0;
-#define MSTORM_CORE_CONN_AG_CTX_BIT0_MASK      0x1
-#define MSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT     0
-#define MSTORM_CORE_CONN_AG_CTX_BIT1_MASK      0x1
-#define MSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT     1
-#define MSTORM_CORE_CONN_AG_CTX_CF0_MASK       0x3
-#define MSTORM_CORE_CONN_AG_CTX_CF0_SHIFT      2
-#define MSTORM_CORE_CONN_AG_CTX_CF1_MASK       0x3
-#define MSTORM_CORE_CONN_AG_CTX_CF1_SHIFT      4
-#define MSTORM_CORE_CONN_AG_CTX_CF2_MASK       0x3
-#define MSTORM_CORE_CONN_AG_CTX_CF2_SHIFT      6
+#define E4_MSTORM_CORE_CONN_AG_CTX_BIT0_MASK   0x1
+#define E4_MSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT  0
+#define E4_MSTORM_CORE_CONN_AG_CTX_BIT1_MASK   0x1
+#define E4_MSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT  1
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF0_MASK    0x3
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF0_SHIFT   2
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF1_MASK    0x3
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF1_SHIFT   4
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF2_MASK    0x3
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF2_SHIFT   6
        u8 flags1;
-#define MSTORM_CORE_CONN_AG_CTX_CF0EN_MASK     0x1
-#define MSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT    0
-#define MSTORM_CORE_CONN_AG_CTX_CF1EN_MASK     0x1
-#define MSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT    1
-#define MSTORM_CORE_CONN_AG_CTX_CF2EN_MASK     0x1
-#define MSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT    2
-#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK   0x1
-#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT  3
-#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK   0x1
-#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT  4
-#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK   0x1
-#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT  5
-#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK   0x1
-#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT  6
-#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK   0x1
-#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT  7
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF0EN_MASK          0x1
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT         0
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF1EN_MASK          0x1
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT         1
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF2EN_MASK          0x1
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT         2
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK                0x1
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT       3
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK                0x1
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT       4
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK                0x1
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT       5
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK                0x1
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT       6
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK                0x1
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT       7
        __le16 word0;
        __le16 word1;
        __le32 reg0;
        __le32 reg1;
 };
 
-struct ystorm_core_conn_ag_ctx {
+struct e4_ystorm_core_conn_ag_ctx {
        u8 byte0;
        u8 byte1;
        u8 flags0;
-#define YSTORM_CORE_CONN_AG_CTX_BIT0_MASK      0x1
-#define YSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT     0
-#define YSTORM_CORE_CONN_AG_CTX_BIT1_MASK      0x1
-#define YSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT     1
-#define YSTORM_CORE_CONN_AG_CTX_CF0_MASK       0x3
-#define YSTORM_CORE_CONN_AG_CTX_CF0_SHIFT      2
-#define YSTORM_CORE_CONN_AG_CTX_CF1_MASK       0x3
-#define YSTORM_CORE_CONN_AG_CTX_CF1_SHIFT      4
-#define YSTORM_CORE_CONN_AG_CTX_CF2_MASK       0x3
-#define YSTORM_CORE_CONN_AG_CTX_CF2_SHIFT      6
+#define E4_YSTORM_CORE_CONN_AG_CTX_BIT0_MASK   0x1
+#define E4_YSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT  0
+#define E4_YSTORM_CORE_CONN_AG_CTX_BIT1_MASK   0x1
+#define E4_YSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT  1
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF0_MASK    0x3
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF0_SHIFT   2
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF1_MASK    0x3
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF1_SHIFT   4
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF2_MASK    0x3
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF2_SHIFT   6
        u8 flags1;
-#define YSTORM_CORE_CONN_AG_CTX_CF0EN_MASK     0x1
-#define YSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT    0
-#define YSTORM_CORE_CONN_AG_CTX_CF1EN_MASK     0x1
-#define YSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT    1
-#define YSTORM_CORE_CONN_AG_CTX_CF2EN_MASK     0x1
-#define YSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT    2
-#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK   0x1
-#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT  3
-#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK   0x1
-#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT  4
-#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK   0x1
-#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT  5
-#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK   0x1
-#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT  6
-#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK   0x1
-#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT  7
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF0EN_MASK          0x1
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT         0
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF1EN_MASK          0x1
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT         1
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF2EN_MASK          0x1
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT         2
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK                0x1
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT       3
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK                0x1
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT       4
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK                0x1
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT       5
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK                0x1
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT       6
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK                0x1
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT       7
        u8 byte2;
        u8 byte3;
        __le16 word0;
@@ -1545,22 +1680,22 @@ struct qm_rf_opportunistic_mask {
 };
 
 /* QM hardware structure of QM map memory */
-struct qm_rf_pq_map {
+struct qm_rf_pq_map_e4 {
        __le32 reg;
-#define QM_RF_PQ_MAP_PQ_VALID_MASK             0x1
-#define QM_RF_PQ_MAP_PQ_VALID_SHIFT            0
-#define QM_RF_PQ_MAP_RL_ID_MASK                        0xFF
-#define QM_RF_PQ_MAP_RL_ID_SHIFT               1
-#define QM_RF_PQ_MAP_VP_PQ_ID_MASK             0x1FF
-#define QM_RF_PQ_MAP_VP_PQ_ID_SHIFT            9
-#define QM_RF_PQ_MAP_VOQ_MASK                  0x1F
-#define QM_RF_PQ_MAP_VOQ_SHIFT                 18
-#define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_MASK     0x3
-#define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_SHIFT    23
-#define QM_RF_PQ_MAP_RL_VALID_MASK             0x1
-#define QM_RF_PQ_MAP_RL_VALID_SHIFT            25
-#define QM_RF_PQ_MAP_RESERVED_MASK             0x3F
-#define QM_RF_PQ_MAP_RESERVED_SHIFT            26
+#define QM_RF_PQ_MAP_E4_PQ_VALID_MASK          0x1
+#define QM_RF_PQ_MAP_E4_PQ_VALID_SHIFT         0
+#define QM_RF_PQ_MAP_E4_RL_ID_MASK             0xFF
+#define QM_RF_PQ_MAP_E4_RL_ID_SHIFT            1
+#define QM_RF_PQ_MAP_E4_VP_PQ_ID_MASK          0x1FF
+#define QM_RF_PQ_MAP_E4_VP_PQ_ID_SHIFT         9
+#define QM_RF_PQ_MAP_E4_VOQ_MASK               0x1F
+#define QM_RF_PQ_MAP_E4_VOQ_SHIFT              18
+#define QM_RF_PQ_MAP_E4_WRR_WEIGHT_GROUP_MASK  0x3
+#define QM_RF_PQ_MAP_E4_WRR_WEIGHT_GROUP_SHIFT 23
+#define QM_RF_PQ_MAP_E4_RL_VALID_MASK          0x1
+#define QM_RF_PQ_MAP_E4_RL_VALID_SHIFT         25
+#define QM_RF_PQ_MAP_E4_RESERVED_MASK          0x3F
+#define QM_RF_PQ_MAP_E4_RESERVED_SHIFT         26
 };
 
 /* Completion params for aggregated interrupt completion */
@@ -1643,8 +1778,8 @@ enum block_addr {
        GRCBASE_MULD = 0x4e0000,
        GRCBASE_YULD = 0x4c8000,
        GRCBASE_XYLD = 0x4c0000,
-       GRCBASE_PTLD = 0x590000,
-       GRCBASE_YPLD = 0x5b0000,
+       GRCBASE_PTLD = 0x5a0000,
+       GRCBASE_YPLD = 0x5c0000,
        GRCBASE_PRM = 0x230000,
        GRCBASE_PBF_PB1 = 0xda0000,
        GRCBASE_PBF_PB2 = 0xda4000,
@@ -1675,6 +1810,7 @@ enum block_addr {
        GRCBASE_PHY_PCIE = 0x620000,
        GRCBASE_LED = 0x6b8000,
        GRCBASE_AVS_WRAP = 0x6b0000,
+       GRCBASE_PXPREQBUS = 0x56000,
        GRCBASE_MISC_AEU = 0x8000,
        GRCBASE_BAR0_MAP = 0x1c00000,
        MAX_BLOCK_ADDR
@@ -1766,6 +1902,7 @@ enum block_id {
        BLOCK_PHY_PCIE,
        BLOCK_LED,
        BLOCK_AVS_WRAP,
+       BLOCK_PXPREQBUS,
        BLOCK_MISC_AEU,
        BLOCK_BAR0_MAP,
        MAX_BLOCK_ID
@@ -1841,7 +1978,7 @@ struct dbg_attn_block_result {
        struct dbg_attn_reg_result reg_results[15];
 };
 
-/* mode header */
+/* Mode header */
 struct dbg_mode_hdr {
        __le16 data;
 #define DBG_MODE_HDR_EVAL_MODE_MASK            0x1
@@ -1863,80 +2000,83 @@ struct dbg_attn_reg {
        __le32 mask_address;
 };
 
-/* attention types */
+/* Attention types */
 enum dbg_attn_type {
        ATTN_TYPE_INTERRUPT,
        ATTN_TYPE_PARITY,
        MAX_DBG_ATTN_TYPE
 };
 
+/* Debug Bus block data */
 struct dbg_bus_block {
        u8 num_of_lines;
        u8 has_latency_events;
        __le16 lines_offset;
 };
 
+/* Debug Bus block user data */
 struct dbg_bus_block_user_data {
        u8 num_of_lines;
        u8 has_latency_events;
        __le16 names_offset;
 };
 
+/* Block Debug line data */
 struct dbg_bus_line {
        u8 data;
-#define DBG_BUS_LINE_NUM_OF_GROUPS_MASK  0xF
-#define DBG_BUS_LINE_NUM_OF_GROUPS_SHIFT 0
-#define DBG_BUS_LINE_IS_256B_MASK        0x1
-#define DBG_BUS_LINE_IS_256B_SHIFT       4
-#define DBG_BUS_LINE_RESERVED_MASK       0x7
-#define DBG_BUS_LINE_RESERVED_SHIFT      5
+#define DBG_BUS_LINE_NUM_OF_GROUPS_MASK                0xF
+#define DBG_BUS_LINE_NUM_OF_GROUPS_SHIFT       0
+#define DBG_BUS_LINE_IS_256B_MASK              0x1
+#define DBG_BUS_LINE_IS_256B_SHIFT             4
+#define DBG_BUS_LINE_RESERVED_MASK             0x7
+#define DBG_BUS_LINE_RESERVED_SHIFT            5
        u8 group_sizes;
 };
 
-/* condition header for registers dump */
+/* Condition header for registers dump */
 struct dbg_dump_cond_hdr {
        struct dbg_mode_hdr mode; /* Mode header */
        u8 block_id; /* block ID */
        u8 data_size; /* size in dwords of the data following this header */
 };
 
-/* memory data for registers dump */
+/* Memory data for registers dump */
 struct dbg_dump_mem {
        __le32 dword0;
-#define DBG_DUMP_MEM_ADDRESS_MASK       0xFFFFFF
-#define DBG_DUMP_MEM_ADDRESS_SHIFT      0
-#define DBG_DUMP_MEM_MEM_GROUP_ID_MASK  0xFF
-#define DBG_DUMP_MEM_MEM_GROUP_ID_SHIFT 24
+#define DBG_DUMP_MEM_ADDRESS_MASK      0xFFFFFF
+#define DBG_DUMP_MEM_ADDRESS_SHIFT     0
+#define DBG_DUMP_MEM_MEM_GROUP_ID_MASK 0xFF
+#define DBG_DUMP_MEM_MEM_GROUP_ID_SHIFT        24
        __le32 dword1;
-#define DBG_DUMP_MEM_LENGTH_MASK        0xFFFFFF
-#define DBG_DUMP_MEM_LENGTH_SHIFT       0
-#define DBG_DUMP_MEM_WIDE_BUS_MASK      0x1
-#define DBG_DUMP_MEM_WIDE_BUS_SHIFT     24
-#define DBG_DUMP_MEM_RESERVED_MASK      0x7F
-#define DBG_DUMP_MEM_RESERVED_SHIFT     25
+#define DBG_DUMP_MEM_LENGTH_MASK       0xFFFFFF
+#define DBG_DUMP_MEM_LENGTH_SHIFT      0
+#define DBG_DUMP_MEM_WIDE_BUS_MASK     0x1
+#define DBG_DUMP_MEM_WIDE_BUS_SHIFT    24
+#define DBG_DUMP_MEM_RESERVED_MASK     0x7F
+#define DBG_DUMP_MEM_RESERVED_SHIFT    25
 };
 
-/* register data for registers dump */
+/* Register data for registers dump */
 struct dbg_dump_reg {
        __le32 data;
-#define DBG_DUMP_REG_ADDRESS_MASK 0x7FFFFF /* register address (in dwords) */
-#define DBG_DUMP_REG_ADDRESS_SHIFT 0
-#define DBG_DUMP_REG_WIDE_BUS_MASK 0x1 /* indicates register is wide-bus */
-#define DBG_DUMP_REG_WIDE_BUS_SHIFT 23
-#define DBG_DUMP_REG_LENGTH_MASK  0xFF /* register size (in dwords) */
-#define DBG_DUMP_REG_LENGTH_SHIFT 24
+#define DBG_DUMP_REG_ADDRESS_MASK      0x7FFFFF
+#define DBG_DUMP_REG_ADDRESS_SHIFT     0
+#define DBG_DUMP_REG_WIDE_BUS_MASK     0x1
+#define DBG_DUMP_REG_WIDE_BUS_SHIFT    23
+#define DBG_DUMP_REG_LENGTH_MASK       0xFF
+#define DBG_DUMP_REG_LENGTH_SHIFT      24
 };
 
-/* split header for registers dump */
+/* Split header for registers dump */
 struct dbg_dump_split_hdr {
        __le32 hdr;
-#define DBG_DUMP_SPLIT_HDR_DATA_SIZE_MASK      0xFFFFFF
-#define DBG_DUMP_SPLIT_HDR_DATA_SIZE_SHIFT     0
-#define DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID_MASK  0xFF
-#define DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID_SHIFT 24
+#define DBG_DUMP_SPLIT_HDR_DATA_SIZE_MASK      0xFFFFFF
+#define DBG_DUMP_SPLIT_HDR_DATA_SIZE_SHIFT     0
+#define DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID_MASK  0xFF
+#define DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID_SHIFT 24
 };
 
-/* condition header for idle check */
+/* Condition header for idle check */
 struct dbg_idle_chk_cond_hdr {
        struct dbg_mode_hdr mode; /* Mode header */
        __le16 data_size; /* size in dwords of the data following this header */
@@ -1945,12 +2085,12 @@ struct dbg_idle_chk_cond_hdr {
 /* Idle Check condition register */
 struct dbg_idle_chk_cond_reg {
        __le32 data;
-#define DBG_IDLE_CHK_COND_REG_ADDRESS_MASK   0x7FFFFF
-#define DBG_IDLE_CHK_COND_REG_ADDRESS_SHIFT  0
-#define DBG_IDLE_CHK_COND_REG_WIDE_BUS_MASK  0x1
-#define DBG_IDLE_CHK_COND_REG_WIDE_BUS_SHIFT 23
-#define DBG_IDLE_CHK_COND_REG_BLOCK_ID_MASK  0xFF
-#define DBG_IDLE_CHK_COND_REG_BLOCK_ID_SHIFT 24
+#define DBG_IDLE_CHK_COND_REG_ADDRESS_MASK     0x7FFFFF
+#define DBG_IDLE_CHK_COND_REG_ADDRESS_SHIFT    0
+#define DBG_IDLE_CHK_COND_REG_WIDE_BUS_MASK    0x1
+#define DBG_IDLE_CHK_COND_REG_WIDE_BUS_SHIFT   23
+#define DBG_IDLE_CHK_COND_REG_BLOCK_ID_MASK    0xFF
+#define DBG_IDLE_CHK_COND_REG_BLOCK_ID_SHIFT   24
        __le16 num_entries;
        u8 entry_size;
        u8 start_entry;
@@ -1959,12 +2099,12 @@ struct dbg_idle_chk_cond_reg {
 /* Idle Check info register */
 struct dbg_idle_chk_info_reg {
        __le32 data;
-#define DBG_IDLE_CHK_INFO_REG_ADDRESS_MASK   0x7FFFFF
-#define DBG_IDLE_CHK_INFO_REG_ADDRESS_SHIFT  0
-#define DBG_IDLE_CHK_INFO_REG_WIDE_BUS_MASK  0x1
-#define DBG_IDLE_CHK_INFO_REG_WIDE_BUS_SHIFT 23
-#define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_MASK  0xFF
-#define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_SHIFT 24
+#define DBG_IDLE_CHK_INFO_REG_ADDRESS_MASK     0x7FFFFF
+#define DBG_IDLE_CHK_INFO_REG_ADDRESS_SHIFT    0
+#define DBG_IDLE_CHK_INFO_REG_WIDE_BUS_MASK    0x1
+#define DBG_IDLE_CHK_INFO_REG_WIDE_BUS_SHIFT   23
+#define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_MASK    0xFF
+#define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_SHIFT   24
        __le16 size; /* register size in dwords */
        struct dbg_mode_hdr mode; /* Mode header */
 };
@@ -2016,13 +2156,13 @@ struct dbg_idle_chk_rule {
 /* Idle Check rule parsing data */
 struct dbg_idle_chk_rule_parsing_data {
        __le32 data;
-#define DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG_MASK  0x1
-#define DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG_SHIFT 0
-#define DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET_MASK  0x7FFFFFFF
-#define DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET_SHIFT 1
+#define DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG_MASK 0x1
+#define DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG_SHIFT        0
+#define DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET_MASK 0x7FFFFFFF
+#define DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET_SHIFT        1
 };
 
-/* idle check severity types */
+/* Idle check severity types */
 enum dbg_idle_chk_severity_types {
        /* idle check failure should cause an error */
        IDLE_CHK_SEVERITY_ERROR,
@@ -2036,14 +2176,14 @@ enum dbg_idle_chk_severity_types {
 /* Debug Bus block data */
 struct dbg_bus_block_data {
        __le16 data;
-#define DBG_BUS_BLOCK_DATA_ENABLE_MASK_MASK       0xF
-#define DBG_BUS_BLOCK_DATA_ENABLE_MASK_SHIFT      0
-#define DBG_BUS_BLOCK_DATA_RIGHT_SHIFT_MASK       0xF
-#define DBG_BUS_BLOCK_DATA_RIGHT_SHIFT_SHIFT      4
-#define DBG_BUS_BLOCK_DATA_FORCE_VALID_MASK_MASK  0xF
-#define DBG_BUS_BLOCK_DATA_FORCE_VALID_MASK_SHIFT 8
-#define DBG_BUS_BLOCK_DATA_FORCE_FRAME_MASK_MASK  0xF
-#define DBG_BUS_BLOCK_DATA_FORCE_FRAME_MASK_SHIFT 12
+#define DBG_BUS_BLOCK_DATA_ENABLE_MASK_MASK            0xF
+#define DBG_BUS_BLOCK_DATA_ENABLE_MASK_SHIFT           0
+#define DBG_BUS_BLOCK_DATA_RIGHT_SHIFT_MASK            0xF
+#define DBG_BUS_BLOCK_DATA_RIGHT_SHIFT_SHIFT           4
+#define DBG_BUS_BLOCK_DATA_FORCE_VALID_MASK_MASK       0xF
+#define DBG_BUS_BLOCK_DATA_FORCE_VALID_MASK_SHIFT      8
+#define DBG_BUS_BLOCK_DATA_FORCE_FRAME_MASK_MASK       0xF
+#define DBG_BUS_BLOCK_DATA_FORCE_FRAME_MASK_SHIFT      12
        u8 line_num;
        u8 hw_id;
 };
@@ -2072,6 +2212,7 @@ enum dbg_bus_clients {
        MAX_DBG_BUS_CLIENTS
 };
 
+/* Debug Bus constraint operation types */
 enum dbg_bus_constraint_ops {
        DBG_BUS_CONSTRAINT_OP_EQ,
        DBG_BUS_CONSTRAINT_OP_NE,
@@ -2086,12 +2227,13 @@ enum dbg_bus_constraint_ops {
        MAX_DBG_BUS_CONSTRAINT_OPS
 };
 
+/* Debug Bus trigger state data */
 struct dbg_bus_trigger_state_data {
        u8 data;
-#define DBG_BUS_TRIGGER_STATE_DATA_BLOCK_SHIFTED_ENABLE_MASK_MASK  0xF
-#define DBG_BUS_TRIGGER_STATE_DATA_BLOCK_SHIFTED_ENABLE_MASK_SHIFT 0
-#define DBG_BUS_TRIGGER_STATE_DATA_CONSTRAINT_DWORD_MASK_MASK      0xF
-#define DBG_BUS_TRIGGER_STATE_DATA_CONSTRAINT_DWORD_MASK_SHIFT     4
+#define DBG_BUS_TRIGGER_STATE_DATA_BLOCK_SHIFTED_ENABLE_MASK_MASK      0xF
+#define DBG_BUS_TRIGGER_STATE_DATA_BLOCK_SHIFTED_ENABLE_MASK_SHIFT     0
+#define DBG_BUS_TRIGGER_STATE_DATA_CONSTRAINT_DWORD_MASK_MASK          0xF
+#define DBG_BUS_TRIGGER_STATE_DATA_CONSTRAINT_DWORD_MASK_SHIFT         4
 };
 
 /* Debug Bus memory address */
@@ -2165,6 +2307,7 @@ struct dbg_bus_data {
        struct dbg_bus_storm_data storms[6];
 };
 
+/* Debug bus filter types */
 enum dbg_bus_filter_types {
        DBG_BUS_FILTER_TYPE_OFF,
        DBG_BUS_FILTER_TYPE_PRE,
@@ -2181,6 +2324,7 @@ enum dbg_bus_frame_modes {
        MAX_DBG_BUS_FRAME_MODES
 };
 
+/* Debug bus other engine mode */
 enum dbg_bus_other_engine_modes {
        DBG_BUS_OTHER_ENGINE_MODE_NONE,
        DBG_BUS_OTHER_ENGINE_MODE_DOUBLE_BW_TX,
@@ -2190,12 +2334,14 @@ enum dbg_bus_other_engine_modes {
        MAX_DBG_BUS_OTHER_ENGINE_MODES
 };
 
+/* Debug bus post-trigger recording types */
 enum dbg_bus_post_trigger_types {
        DBG_BUS_POST_TRIGGER_RECORD,
        DBG_BUS_POST_TRIGGER_DROP,
        MAX_DBG_BUS_POST_TRIGGER_TYPES
 };
 
+/* Debug bus pre-trigger recording types */
 enum dbg_bus_pre_trigger_types {
        DBG_BUS_PRE_TRIGGER_START_FROM_ZERO,
        DBG_BUS_PRE_TRIGGER_NUM_CHUNKS,
@@ -2203,11 +2349,10 @@ enum dbg_bus_pre_trigger_types {
        MAX_DBG_BUS_PRE_TRIGGER_TYPES
 };
 
+/* Debug bus SEMI frame modes */
 enum dbg_bus_semi_frame_modes {
-       DBG_BUS_SEMI_FRAME_MODE_0SLOW_4FAST =
-           0,
-       DBG_BUS_SEMI_FRAME_MODE_4SLOW_0FAST =
-           3,
+       DBG_BUS_SEMI_FRAME_MODE_0SLOW_4FAST = 0,
+       DBG_BUS_SEMI_FRAME_MODE_4SLOW_0FAST = 3,
        MAX_DBG_BUS_SEMI_FRAME_MODES
 };
 
@@ -2220,6 +2365,7 @@ enum dbg_bus_states {
        MAX_DBG_BUS_STATES
 };
 
+/* Debug Bus Storm modes */
 enum dbg_bus_storm_modes {
        DBG_BUS_STORM_MODE_PRINTF,
        DBG_BUS_STORM_MODE_PRAM_ADDR,
@@ -2352,7 +2498,7 @@ enum dbg_status {
        DBG_STATUS_MCP_TRACE_NO_META,
        DBG_STATUS_MCP_COULD_NOT_HALT,
        DBG_STATUS_MCP_COULD_NOT_RESUME,
-       DBG_STATUS_DMAE_FAILED,
+       DBG_STATUS_RESERVED2,
        DBG_STATUS_SEMI_FIFO_NOT_EMPTY,
        DBG_STATUS_IGU_FIFO_BAD_DATA,
        DBG_STATUS_MCP_COULD_NOT_MASK_PRTY,
@@ -2396,7 +2542,8 @@ struct dbg_tools_data {
        u8 chip_id;
        u8 platform_id;
        u8 initialized;
-       u8 reserved;
+       u8 use_dmae;
+       __le32 num_regs_read;
 };
 
 /********************************/
@@ -2406,6 +2553,7 @@ struct dbg_tools_data {
 /* Number of VLAN priorities */
 #define NUM_OF_VLAN_PRIORITIES 8
 
+/* BRB RAM init requirements */
 struct init_brb_ram_req {
        __le32 guranteed_per_tc;
        __le32 headroom_per_tc;
@@ -2414,17 +2562,20 @@ struct init_brb_ram_req {
        u8 num_active_tcs[MAX_NUM_PORTS];
 };
 
+/* ETS per-TC init requirements */
 struct init_ets_tc_req {
        u8 use_sp;
        u8 use_wfq;
        __le16 weight;
 };
 
+/* ETS init requirements */
 struct init_ets_req {
        __le32 mtu;
        struct init_ets_tc_req tc_req[NUM_OF_TCS];
 };
 
+/* NIG LB RL init requirements */
 struct init_nig_lb_rl_req {
        __le16 lb_mac_rate;
        __le16 lb_rate;
@@ -2432,15 +2583,18 @@ struct init_nig_lb_rl_req {
        __le16 tc_rate[NUM_OF_PHYS_TCS];
 };
 
+/* NIG TC mapping for each priority */
 struct init_nig_pri_tc_map_entry {
        u8 tc_id;
        u8 valid;
 };
 
+/* NIG priority to TC map init requirements */
 struct init_nig_pri_tc_map_req {
        struct init_nig_pri_tc_map_entry pri[NUM_OF_VLAN_PRIORITIES];
 };
 
+/* QM per-port init parameters */
 struct init_qm_port_params {
        u8 active;
        u8 active_phys_tcs;
@@ -2563,7 +2717,7 @@ struct bin_buffer_hdr {
        __le32 length;
 };
 
-/* binary init buffer types */
+/* Binary init buffer types */
 enum bin_init_buffer_type {
        BIN_BUF_INIT_FW_VER_INFO,
        BIN_BUF_INIT_CMD,
@@ -2793,6 +2947,7 @@ struct iro {
 };
 
 /***************************** Public Functions *******************************/
+
 /**
  * @brief qed_dbg_set_bin_ptr - Sets a pointer to the binary data with debug
  *     arrays.
@@ -2802,6 +2957,18 @@ struct iro {
 enum dbg_status qed_dbg_set_bin_ptr(const u8 * const bin_ptr);
 
 /**
+ * @brief qed_read_regs - Reads registers into a buffer (using GRC).
+ *
+ * @param p_hwfn - HW device data
+ * @param p_ptt - Ptt window used for writing the registers.
+ * @param buf - Destination buffer.
+ * @param addr - Source GRC address in dwords.
+ * @param len - Number of registers to read.
+ */
+void qed_read_regs(struct qed_hwfn *p_hwfn,
+                  struct qed_ptt *p_ptt, u32 *buf, u32 addr, u32 len);
+
+/**
  * @brief qed_dbg_grc_set_params_default - Reverts all GRC parameters to their
  *     default value.
  *
@@ -3119,6 +3286,7 @@ enum dbg_status qed_dbg_print_attn(struct qed_hwfn *p_hwfn,
 #define MAX_NAME_LEN   16
 
 /***************************** Public Functions *******************************/
+
 /**
  * @brief qed_dbg_user_set_bin_ptr - Sets a pointer to the binary data with
  *     debug arrays.
@@ -3172,6 +3340,18 @@ enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn,
                                           u32 *num_warnings);
 
 /**
+ * @brief qed_dbg_mcp_trace_set_meta_data - Sets a pointer to the MCP Trace
+ *     meta data.
+ *
+ * Needed in case the MCP Trace dump doesn't contain the meta data (e.g. due to
+ * no NVRAM access).
+ *
+ * @param data - pointer to MCP Trace meta data
+ * @param size - size of MCP Trace meta data in dwords
+ */
+void qed_dbg_mcp_trace_set_meta_data(u32 *data, u32 size);
+
+/**
  * @brief qed_get_mcp_trace_results_buf_size - Returns the required buffer size
  *     for MCP Trace results (in bytes).
  *
@@ -3607,6 +3787,9 @@ static const u32 dbg_bus_blocks[] = {
        0x00000000,             /* bar0_map, bb, 0 lines */
        0x00000000,             /* bar0_map, k2, 0 lines */
        0x00000000,
+       0x00000000,             /* bar0_map, bb, 0 lines */
+       0x00000000,             /* bar0_map, k2, 0 lines */
+       0x00000000,
 };
 
 /* Win 2 */
@@ -3645,7 +3828,6 @@ static const u32 dbg_bus_blocks[] = {
  * Returns the required host memory size in 4KB units.
  * Must be called before all QM init HSI functions.
  *
- * @param pf_id - physical function ID
  * @param num_pf_cids - number of connections used by this PF
  * @param num_vf_cids - number of connections used by VFs of this PF
  * @param num_tids - number of tasks used by this PF
@@ -3654,8 +3836,7 @@ static const u32 dbg_bus_blocks[] = {
  *
  * @return The required host memory size in 4KB units.
  */
-u32 qed_qm_pf_mem_size(u8 pf_id,
-                      u32 num_pf_cids,
+u32 qed_qm_pf_mem_size(u32 num_pf_cids,
                       u32 num_vf_cids,
                       u32 num_tids, u16 num_pf_pqs, u16 num_vf_pqs);
 
@@ -3676,7 +3857,7 @@ struct qed_qm_pf_rt_init_params {
        u8 port_id;
        u8 pf_id;
        u8 max_phys_tcs_per_port;
-       bool is_first_pf;
+       bool is_pf_loading;
        u32 num_pf_cids;
        u32 num_vf_cids;
        u32 num_tids;
@@ -3687,6 +3868,7 @@ struct qed_qm_pf_rt_init_params {
        u8 num_vports;
        u16 pf_wfq;
        u32 pf_rl;
+       u32 link_speed;
        struct init_qm_pq_params *pq_params;
        struct init_qm_vport_params *vport_params;
 };
@@ -3744,11 +3926,14 @@ int qed_init_vport_wfq(struct qed_hwfn *p_hwfn,
  * @param p_ptt - ptt window used for writing the registers
  * @param vport_id - VPORT ID
  * @param vport_rl - rate limit in Mb/sec units
+ * @param link_speed - link speed in Mbps.
  *
  * @return 0 on success, -1 on error.
  */
 int qed_init_vport_rl(struct qed_hwfn *p_hwfn,
-                     struct qed_ptt *p_ptt, u8 vport_id, u32 vport_rl);
+                     struct qed_ptt *p_ptt,
+                     u8 vport_id, u32 vport_rl, u32 link_speed);
+
 /**
  * @brief qed_send_qm_stop_cmd  Sends a stop command to the QM
  *
@@ -3759,7 +3944,8 @@ int qed_init_vport_rl(struct qed_hwfn *p_hwfn,
  * @param start_pq - first PQ ID to stop
  * @param num_pqs - Number of PQs to stop, starting from start_pq.
  *
- * @return bool, true if successful, false if timeout occured while waiting for QM command done.
+ * @return bool, true if successful, false if timeout occurred while waiting for
+ *     QM command done.
  */
 bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
                          struct qed_ptt *p_ptt,
@@ -3769,6 +3955,7 @@ bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
 /**
  * @brief qed_set_vxlan_dest_port - initializes vxlan tunnel destination udp port
  *
+ * @param p_hwfn
  * @param p_ptt - ptt window used for writing the registers.
  * @param dest_port - vxlan destination udp port.
  */
@@ -3778,6 +3965,7 @@ void qed_set_vxlan_dest_port(struct qed_hwfn *p_hwfn,
 /**
  * @brief qed_set_vxlan_enable - enable or disable VXLAN tunnel in HW
  *
+ * @param p_hwfn
  * @param p_ptt - ptt window used for writing the registers.
  * @param vxlan_enable - vxlan enable flag.
  */
@@ -3787,6 +3975,7 @@ void qed_set_vxlan_enable(struct qed_hwfn *p_hwfn,
 /**
  * @brief qed_set_gre_enable - enable or disable GRE tunnel in HW
  *
+ * @param p_hwfn
  * @param p_ptt - ptt window used for writing the registers.
  * @param eth_gre_enable - eth GRE enable enable flag.
  * @param ip_gre_enable - IP GRE enable enable flag.
@@ -3798,6 +3987,7 @@ void qed_set_gre_enable(struct qed_hwfn *p_hwfn,
 /**
  * @brief qed_set_geneve_dest_port - initializes geneve tunnel destination udp port
  *
+ * @param p_hwfn
  * @param p_ptt - ptt window used for writing the registers.
  * @param dest_port - geneve destination udp port.
  */
@@ -3814,612 +4004,921 @@ void qed_set_geneve_dest_port(struct qed_hwfn *p_hwfn,
 void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
                           struct qed_ptt *p_ptt,
                           bool eth_geneve_enable, bool ip_geneve_enable);
-void qed_set_rfs_mode_disable(struct qed_hwfn *p_hwfn,
-                             struct qed_ptt *p_ptt, u16 pf_id);
-void qed_set_rfs_mode_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
-                            u16 pf_id, bool tcp, bool udp,
-                            bool ipv4, bool ipv6);
-
-#define        YSTORM_FLOW_CONTROL_MODE_OFFSET                 (IRO[0].base)
-#define        YSTORM_FLOW_CONTROL_MODE_SIZE                   (IRO[0].size)
-#define        TSTORM_PORT_STAT_OFFSET(port_id) \
+
+/**
+ * @brief qed_gft_disable - Disable GFT
+ *
+ * @param p_hwfn
+ * @param p_ptt - ptt window used for writing the registers.
+ * @param pf_id - pf on which to disable GFT.
+ */
+void qed_gft_disable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 pf_id);
+
+/**
+ * @brief qed_gft_config - Enable and configure HW for GFT
+ *
+ * @param p_hwfn
+ * @param p_ptt - ptt window used for writing the registers.
+ * @param pf_id - pf on which to enable GFT.
+ * @param tcp - set profile tcp packets.
+ * @param udp - set profile udp  packet.
+ * @param ipv4 - set profile ipv4 packet.
+ * @param ipv6 - set profile ipv6 packet.
+ * @param profile_type - define packet same fields. Use enum gft_profile_type.
+ */
+void qed_gft_config(struct qed_hwfn *p_hwfn,
+                   struct qed_ptt *p_ptt,
+                   u16 pf_id,
+                   bool tcp,
+                   bool udp,
+                   bool ipv4, bool ipv6, enum gft_profile_type profile_type);
+
+/**
+ * @brief qed_enable_context_validation - Enable and configure context
+ *     validation.
+ *
+ * @param p_hwfn
+ * @param p_ptt - ptt window used for writing the registers.
+ */
+void qed_enable_context_validation(struct qed_hwfn *p_hwfn,
+                                  struct qed_ptt *p_ptt);
+
+/**
+ * @brief qed_calc_session_ctx_validation - Calcualte validation byte for
+ *     session context.
+ *
+ * @param p_ctx_mem - pointer to context memory.
+ * @param ctx_size - context size.
+ * @param ctx_type - context type.
+ * @param cid - context cid.
+ */
+void qed_calc_session_ctx_validation(void *p_ctx_mem,
+                                    u16 ctx_size, u8 ctx_type, u32 cid);
+
+/**
+ * @brief qed_calc_task_ctx_validation - Calcualte validation byte for task
+ *     context.
+ *
+ * @param p_ctx_mem - pointer to context memory.
+ * @param ctx_size - context size.
+ * @param ctx_type - context type.
+ * @param tid - context tid.
+ */
+void qed_calc_task_ctx_validation(void *p_ctx_mem,
+                                 u16 ctx_size, u8 ctx_type, u32 tid);
+
+/**
+ * @brief qed_memset_session_ctx - Memset session context to 0 while
+ *     preserving validation bytes.
+ *
+ * @param p_hwfn -
+ * @param p_ctx_mem - pointer to context memory.
+ * @param ctx_size - size to initialzie.
+ * @param ctx_type - context type.
+ */
+void qed_memset_session_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type);
+
+/**
+ * @brief qed_memset_task_ctx - Memset task context to 0 while preserving
+ *     validation bytes.
+ *
+ * @param p_ctx_mem - pointer to context memory.
+ * @param ctx_size - size to initialzie.
+ * @param ctx_type - context type.
+ */
+void qed_memset_task_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type);
+
+/* Ystorm flow control mode. Use enum fw_flow_ctrl_mode */
+#define YSTORM_FLOW_CONTROL_MODE_OFFSET                        (IRO[0].base)
+#define YSTORM_FLOW_CONTROL_MODE_SIZE                  (IRO[0].size)
+
+/* Tstorm port statistics */
+#define TSTORM_PORT_STAT_OFFSET(port_id) \
        (IRO[1].base + ((port_id) * IRO[1].m1))
-#define        TSTORM_PORT_STAT_SIZE                           (IRO[1].size)
+#define TSTORM_PORT_STAT_SIZE                          (IRO[1].size)
+
+/* Tstorm ll2 port statistics */
 #define TSTORM_LL2_PORT_STAT_OFFSET(port_id) \
        (IRO[2].base + ((port_id) * IRO[2].m1))
 #define TSTORM_LL2_PORT_STAT_SIZE                      (IRO[2].size)
-#define        USTORM_VF_PF_CHANNEL_READY_OFFSET(vf_id) \
+
+/* Ustorm VF-PF Channel ready flag */
+#define USTORM_VF_PF_CHANNEL_READY_OFFSET(vf_id) \
        (IRO[3].base + ((vf_id) * IRO[3].m1))
-#define        USTORM_VF_PF_CHANNEL_READY_SIZE                 (IRO[3].size)
-#define        USTORM_FLR_FINAL_ACK_OFFSET(pf_id) \
-       (IRO[4].base + (pf_id) * IRO[4].m1)
-#define        USTORM_FLR_FINAL_ACK_SIZE                       (IRO[4].size)
-#define        USTORM_EQE_CONS_OFFSET(pf_id) \
+#define USTORM_VF_PF_CHANNEL_READY_SIZE                        (IRO[3].size)
+
+/* Ustorm Final flr cleanup ack */
+#define USTORM_FLR_FINAL_ACK_OFFSET(pf_id) \
+       (IRO[4].base + ((pf_id) * IRO[4].m1))
+#define USTORM_FLR_FINAL_ACK_SIZE                      (IRO[4].size)
+
+/* Ustorm Event ring consumer */
+#define USTORM_EQE_CONS_OFFSET(pf_id) \
        (IRO[5].base + ((pf_id) * IRO[5].m1))
-#define        USTORM_EQE_CONS_SIZE                            (IRO[5].size)
-#define        USTORM_ETH_QUEUE_ZONE_OFFSET(queue_zone_id) \
+#define USTORM_EQE_CONS_SIZE                           (IRO[5].size)
+
+/* Ustorm eth queue zone */
+#define USTORM_ETH_QUEUE_ZONE_OFFSET(queue_zone_id) \
        (IRO[6].base + ((queue_zone_id) * IRO[6].m1))
-#define        USTORM_ETH_QUEUE_ZONE_SIZE                      (IRO[6].size)
-#define        USTORM_COMMON_QUEUE_CONS_OFFSET(queue_zone_id) \
+#define USTORM_ETH_QUEUE_ZONE_SIZE                     (IRO[6].size)
+
+/* Ustorm Common Queue ring consumer */
+#define USTORM_COMMON_QUEUE_CONS_OFFSET(queue_zone_id) \
        (IRO[7].base + ((queue_zone_id) * IRO[7].m1))
-#define        USTORM_COMMON_QUEUE_CONS_SIZE                   (IRO[7].size)
+#define USTORM_COMMON_QUEUE_CONS_SIZE                  (IRO[7].size)
+
+/* Xstorm Integration Test Data */
+#define XSTORM_INTEG_TEST_DATA_OFFSET                  (IRO[8].base)
+#define XSTORM_INTEG_TEST_DATA_SIZE                    (IRO[8].size)
+
+/* Ystorm Integration Test Data */
+#define YSTORM_INTEG_TEST_DATA_OFFSET                  (IRO[9].base)
+#define YSTORM_INTEG_TEST_DATA_SIZE                    (IRO[9].size)
+
+/* Pstorm Integration Test Data */
+#define PSTORM_INTEG_TEST_DATA_OFFSET                  (IRO[10].base)
+#define PSTORM_INTEG_TEST_DATA_SIZE                    (IRO[10].size)
+
+/* Tstorm Integration Test Data */
+#define TSTORM_INTEG_TEST_DATA_OFFSET                  (IRO[11].base)
+#define TSTORM_INTEG_TEST_DATA_SIZE                    (IRO[11].size)
+
+/* Mstorm Integration Test Data */
+#define MSTORM_INTEG_TEST_DATA_OFFSET                  (IRO[12].base)
+#define MSTORM_INTEG_TEST_DATA_SIZE                    (IRO[12].size)
+
+/* Ustorm Integration Test Data */
+#define USTORM_INTEG_TEST_DATA_OFFSET                  (IRO[13].base)
+#define USTORM_INTEG_TEST_DATA_SIZE                    (IRO[13].size)
+
+/* Tstorm producers */
 #define TSTORM_LL2_RX_PRODS_OFFSET(core_rx_queue_id) \
-       (IRO[14].base + ((core_rx_queue_id) * IRO[14].m1))
+       (IRO[14].base + ((core_rx_queue_id) * IRO[14].m1))
 #define TSTORM_LL2_RX_PRODS_SIZE                       (IRO[14].size)
+
+/* Tstorm LightL2 queue statistics */
 #define CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \
        (IRO[15].base + ((core_rx_queue_id) * IRO[15].m1))
 #define CORE_LL2_TSTORM_PER_QUEUE_STAT_SIZE            (IRO[15].size)
+
+/* Ustorm LiteL2 queue statistics */
 #define CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \
-       (IRO[16].base + ((core_rx_queue_id) * IRO[16].m1))
+       (IRO[16].base + ((core_rx_queue_id) * IRO[16].m1))
 #define CORE_LL2_USTORM_PER_QUEUE_STAT_SIZE            (IRO[16].size)
+
+/* Pstorm LiteL2 queue statistics */
 #define CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(core_tx_stats_id) \
-       (IRO[17].base + ((core_tx_stats_id) * IRO[17].m1))
-#define CORE_LL2_PSTORM_PER_QUEUE_STAT_SIZE    (IRO[17].       size)
-#define        MSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
+       (IRO[17].base + ((core_tx_stats_id) * IRO[17].m1))
+#define CORE_LL2_PSTORM_PER_QUEUE_STAT_SIZE            (IRO[17].size)
+
+/* Mstorm queue statistics */
+#define MSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
        (IRO[18].base + ((stat_counter_id) * IRO[18].m1))
-#define        MSTORM_QUEUE_STAT_SIZE                          (IRO[18].size)
-#define        MSTORM_ETH_PF_PRODS_OFFSET(queue_id) \
+#define MSTORM_QUEUE_STAT_SIZE                         (IRO[18].size)
+
+/* Mstorm ETH PF queues producers */
+#define MSTORM_ETH_PF_PRODS_OFFSET(queue_id) \
        (IRO[19].base + ((queue_id) * IRO[19].m1))
-#define        MSTORM_ETH_PF_PRODS_SIZE                        (IRO[19].size)
+#define MSTORM_ETH_PF_PRODS_SIZE                       (IRO[19].size)
+
+/* Mstorm ETH VF queues producers offset in RAM. Used in default VF zone size
+ * mode.
+ */
 #define MSTORM_ETH_VF_PRODS_OFFSET(vf_id, vf_queue_id) \
-       (IRO[20].base + ((vf_id) * IRO[20].m1) + ((vf_queue_id) * IRO[20].m2))
+       (IRO[20].base + ((vf_id) * IRO[20].m1) + ((vf_queue_id) * IRO[20].m2))
 #define MSTORM_ETH_VF_PRODS_SIZE                       (IRO[20].size)
-#define        MSTORM_TPA_TIMEOUT_US_OFFSET                    (IRO[21].base)
-#define        MSTORM_TPA_TIMEOUT_US_SIZE                      (IRO[21].size)
-#define        MSTORM_ETH_PF_STAT_OFFSET(pf_id) \
+
+/* TPA agregation timeout in us resolution (on ASIC) */
+#define MSTORM_TPA_TIMEOUT_US_OFFSET                   (IRO[21].base)
+#define MSTORM_TPA_TIMEOUT_US_SIZE                     (IRO[21].size)
+
+/* Mstorm pf statistics */
+#define MSTORM_ETH_PF_STAT_OFFSET(pf_id) \
        (IRO[22].base + ((pf_id) * IRO[22].m1))
-#define        MSTORM_ETH_PF_STAT_SIZE                         (IRO[22].size)
-#define        USTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
+#define MSTORM_ETH_PF_STAT_SIZE                                (IRO[22].size)
+
+/* Ustorm queue statistics */
+#define USTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
        (IRO[23].base + ((stat_counter_id) * IRO[23].m1))
-#define        USTORM_QUEUE_STAT_SIZE                          (IRO[23].size)
-#define        USTORM_ETH_PF_STAT_OFFSET(pf_id) \
+#define USTORM_QUEUE_STAT_SIZE                         (IRO[23].size)
+
+/* Ustorm pf statistics */
+#define USTORM_ETH_PF_STAT_OFFSET(pf_id)\
        (IRO[24].base + ((pf_id) * IRO[24].m1))
-#define        USTORM_ETH_PF_STAT_SIZE                         (IRO[24].size)
-#define        PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
+#define USTORM_ETH_PF_STAT_SIZE                                (IRO[24].size)
+
+/* Pstorm queue statistics */
+#define PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
        (IRO[25].base + ((stat_counter_id) * IRO[25].m1))
-#define        PSTORM_QUEUE_STAT_SIZE                          (IRO[25].size)
-#define        PSTORM_ETH_PF_STAT_OFFSET(pf_id) \
+#define PSTORM_QUEUE_STAT_SIZE                         (IRO[25].size)
+
+/* Pstorm pf statistics */
+#define PSTORM_ETH_PF_STAT_OFFSET(pf_id) \
        (IRO[26].base + ((pf_id) * IRO[26].m1))
-#define        PSTORM_ETH_PF_STAT_SIZE                         (IRO[26].size)
-#define        PSTORM_CTL_FRAME_ETHTYPE_OFFSET(ethtype) \
-       (IRO[27].base + ((ethtype) * IRO[27].m1))
-#define        PSTORM_CTL_FRAME_ETHTYPE_SIZE                   (IRO[27].size)
-#define        TSTORM_ETH_PRS_INPUT_OFFSET                     (IRO[28].base)
-#define        TSTORM_ETH_PRS_INPUT_SIZE                       (IRO[28].size)
-#define        ETH_RX_RATE_LIMIT_OFFSET(pf_id) \
+#define PSTORM_ETH_PF_STAT_SIZE                                (IRO[26].size)
+
+/* Control frame's EthType configuration for TX control frame security */
+#define PSTORM_CTL_FRAME_ETHTYPE_OFFSET(eth_type_id) \
+       (IRO[27].base + ((eth_type_id) * IRO[27].m1))
+#define PSTORM_CTL_FRAME_ETHTYPE_SIZE                  (IRO[27].size)
+
+/* Tstorm last parser message */
+#define TSTORM_ETH_PRS_INPUT_OFFSET                    (IRO[28].base)
+#define TSTORM_ETH_PRS_INPUT_SIZE                      (IRO[28].size)
+
+/* Tstorm Eth limit Rx rate */
+#define ETH_RX_RATE_LIMIT_OFFSET(pf_id) \
        (IRO[29].base + ((pf_id) * IRO[29].m1))
-#define        ETH_RX_RATE_LIMIT_SIZE                          (IRO[29].size)
-#define        XSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) \
+#define ETH_RX_RATE_LIMIT_SIZE                         (IRO[29].size)
+
+/* Xstorm queue zone */
+#define XSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) \
        (IRO[30].base + ((queue_id) * IRO[30].m1))
-#define        XSTORM_ETH_QUEUE_ZONE_SIZE                      (IRO[30].size)
+#define XSTORM_ETH_QUEUE_ZONE_SIZE                     (IRO[30].size)
+
+/* Ystorm cqe producer */
+#define YSTORM_TOE_CQ_PROD_OFFSET(rss_id) \
+       (IRO[31].base + ((rss_id) * IRO[31].m1))
+#define YSTORM_TOE_CQ_PROD_SIZE                                (IRO[31].size)
+
+/* Ustorm cqe producer */
+#define USTORM_TOE_CQ_PROD_OFFSET(rss_id) \
+       (IRO[32].base + ((rss_id) * IRO[32].m1))
+#define USTORM_TOE_CQ_PROD_SIZE                                (IRO[32].size)
+
+/* Ustorm grq producer */
+#define USTORM_TOE_GRQ_PROD_OFFSET(pf_id) \
+       (IRO[33].base + ((pf_id) * IRO[33].m1))
+#define USTORM_TOE_GRQ_PROD_SIZE                       (IRO[33].size)
+
+/* Tstorm cmdq-cons of given command queue-id */
 #define TSTORM_SCSI_CMDQ_CONS_OFFSET(cmdq_queue_id) \
-       (IRO[34].base + ((cmdq_queue_id) * IRO[34].m1))
-#define TSTORM_SCSI_CMDQ_CONS_SIZE                             (IRO[34].size)
+       (IRO[34].base + ((cmdq_queue_id) * IRO[34].m1))
+#define TSTORM_SCSI_CMDQ_CONS_SIZE                     (IRO[34].size)
+
+/* Tstorm (reflects M-Storm) bdq-external-producer of given function ID,
+ * BDqueue-id.
+ */
 #define TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id, bdq_id) \
-       (IRO[35].base + ((func_id) * IRO[35].m1) + ((bdq_id) * IRO[35].m2))
-#define TSTORM_SCSI_BDQ_EXT_PROD_SIZE                          (IRO[35].size)
+       (IRO[35].base + ((func_id) * IRO[35].m1) + ((bdq_id) * IRO[35].m2))
+#define TSTORM_SCSI_BDQ_EXT_PROD_SIZE                  (IRO[35].size)
+
+/* Mstorm bdq-external-producer of given BDQ resource ID, BDqueue-id */
 #define MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id, bdq_id) \
-       (IRO[36].base + ((func_id) * IRO[36].m1) + ((bdq_id) * IRO[36].m2))
-#define MSTORM_SCSI_BDQ_EXT_PROD_SIZE                          (IRO[36].size)
+       (IRO[36].base + ((func_id) * IRO[36].m1) + ((bdq_id) * IRO[36].m2))
+#define MSTORM_SCSI_BDQ_EXT_PROD_SIZE                  (IRO[36].size)
+
+/* Tstorm iSCSI RX stats */
 #define TSTORM_ISCSI_RX_STATS_OFFSET(pf_id) \
-       (IRO[37].base + ((pf_id) * IRO[37].m1))
-#define TSTORM_ISCSI_RX_STATS_SIZE                             (IRO[37].size)
+       (IRO[37].base + ((pf_id) * IRO[37].m1))
+#define TSTORM_ISCSI_RX_STATS_SIZE                     (IRO[37].size)
+
+/* Mstorm iSCSI RX stats */
 #define MSTORM_ISCSI_RX_STATS_OFFSET(pf_id) \
-       (IRO[38].base + ((pf_id) * IRO[38].m1))
-#define MSTORM_ISCSI_RX_STATS_SIZE                             (IRO[38].size)
+       (IRO[38].base + ((pf_id) * IRO[38].m1))
+#define MSTORM_ISCSI_RX_STATS_SIZE                     (IRO[38].size)
+
+/* Ustorm iSCSI RX stats */
 #define USTORM_ISCSI_RX_STATS_OFFSET(pf_id) \
-       (IRO[39].base + ((pf_id) * IRO[39].m1))
-#define USTORM_ISCSI_RX_STATS_SIZE                             (IRO[39].size)
+       (IRO[39].base + ((pf_id) * IRO[39].m1))
+#define USTORM_ISCSI_RX_STATS_SIZE                     (IRO[39].size)
+
+/* Xstorm iSCSI TX stats */
 #define XSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \
-       (IRO[40].base + ((pf_id) * IRO[40].m1))
-#define XSTORM_ISCSI_TX_STATS_SIZE                             (IRO[40].size)
+       (IRO[40].base + ((pf_id) * IRO[40].m1))
+#define XSTORM_ISCSI_TX_STATS_SIZE                     (IRO[40].size)
+
+/* Ystorm iSCSI TX stats */
 #define YSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \
-       (IRO[41].base + ((pf_id) * IRO[41].m1))
-#define YSTORM_ISCSI_TX_STATS_SIZE                             (IRO[41].size)
+       (IRO[41].base + ((pf_id) * IRO[41].m1))
+#define YSTORM_ISCSI_TX_STATS_SIZE                     (IRO[41].size)
+
+/* Pstorm iSCSI TX stats */
 #define PSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \
-       (IRO[42].base + ((pf_id) * IRO[42].m1))
-#define PSTORM_ISCSI_TX_STATS_SIZE                             (IRO[42].size)
-#define PSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \
-       (IRO[45].base + ((rdma_stat_counter_id) * IRO[45].m1))
-#define PSTORM_RDMA_QUEUE_STAT_SIZE                            (IRO[45].size)
-#define TSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \
-       (IRO[46].base + ((rdma_stat_counter_id) * IRO[46].m1))
-#define TSTORM_RDMA_QUEUE_STAT_SIZE                            (IRO[46].size)
+       (IRO[42].base + ((pf_id) * IRO[42].m1))
+#define PSTORM_ISCSI_TX_STATS_SIZE                     (IRO[42].size)
+
+/* Tstorm FCoE RX stats */
 #define TSTORM_FCOE_RX_STATS_OFFSET(pf_id) \
-       (IRO[43].base + ((pf_id) * IRO[43].m1))
+       (IRO[43].base + ((pf_id) * IRO[43].m1))
+#define TSTORM_FCOE_RX_STATS_SIZE                      (IRO[43].size)
+
+/* Pstorm FCoE TX stats */
 #define PSTORM_FCOE_TX_STATS_OFFSET(pf_id) \
        (IRO[44].base + ((pf_id) * IRO[44].m1))
+#define PSTORM_FCOE_TX_STATS_SIZE                      (IRO[44].size)
+
+/* Pstorm RDMA queue statistics */
+#define PSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \
+       (IRO[45].base + ((rdma_stat_counter_id) * IRO[45].m1))
+#define PSTORM_RDMA_QUEUE_STAT_SIZE                    (IRO[45].size)
 
-static const struct iro iro_arr[49] = {
+/* Tstorm RDMA queue statistics */
+#define TSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \
+       (IRO[46].base + ((rdma_stat_counter_id) * IRO[46].m1))
+#define TSTORM_RDMA_QUEUE_STAT_SIZE                    (IRO[46].size)
+
+/* Xstorm iWARP rxmit stats */
+#define XSTORM_IWARP_RXMIT_STATS_OFFSET(pf_id) \
+       (IRO[47].base + ((pf_id) * IRO[47].m1))
+#define XSTORM_IWARP_RXMIT_STATS_SIZE                  (IRO[47].size)
+
+/* Tstorm RoCE Event Statistics */
+#define TSTORM_ROCE_EVENTS_STAT_OFFSET(roce_pf_id) \
+       (IRO[48].base + ((roce_pf_id) * IRO[48].m1))
+#define TSTORM_ROCE_EVENTS_STAT_SIZE                   (IRO[48].size)
+
+/* DCQCN Received Statistics */
+#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_OFFSET(roce_pf_id) \
+       (IRO[49].base + ((roce_pf_id) * IRO[49].m1))
+#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_SIZE          (IRO[49].size)
+
+/* DCQCN Sent Statistics */
+#define PSTORM_ROCE_DCQCN_SENT_STATS_OFFSET(roce_pf_id) \
+       (IRO[50].base + ((roce_pf_id) * IRO[50].m1))
+#define PSTORM_ROCE_DCQCN_SENT_STATS_SIZE              (IRO[50].size)
+
+static const struct iro iro_arr[51] = {
        {0x0, 0x0, 0x0, 0x0, 0x8},
-       {0x4cb0, 0x80, 0x0, 0x0, 0x80},
-       {0x6518, 0x20, 0x0, 0x0, 0x20},
+       {0x4cb8, 0x88, 0x0, 0x0, 0x88},
+       {0x6530, 0x20, 0x0, 0x0, 0x20},
        {0xb00, 0x8, 0x0, 0x0, 0x4},
        {0xa80, 0x8, 0x0, 0x0, 0x4},
        {0x0, 0x8, 0x0, 0x0, 0x2},
        {0x80, 0x8, 0x0, 0x0, 0x4},
        {0x84, 0x8, 0x0, 0x0, 0x2},
+       {0x4c48, 0x0, 0x0, 0x0, 0x78},
+       {0x3e18, 0x0, 0x0, 0x0, 0x78},
+       {0x2b58, 0x0, 0x0, 0x0, 0x78},
        {0x4c40, 0x0, 0x0, 0x0, 0x78},
-       {0x3df0, 0x0, 0x0, 0x0, 0x78},
-       {0x29b0, 0x0, 0x0, 0x0, 0x78},
-       {0x4c38, 0x0, 0x0, 0x0, 0x78},
-       {0x4990, 0x0, 0x0, 0x0, 0x78},
-       {0x7f48, 0x0, 0x0, 0x0, 0x78},
+       {0x4998, 0x0, 0x0, 0x0, 0x78},
+       {0x7f50, 0x0, 0x0, 0x0, 0x78},
        {0xa28, 0x8, 0x0, 0x0, 0x8},
-       {0x61f8, 0x10, 0x0, 0x0, 0x10},
-       {0xbd20, 0x30, 0x0, 0x0, 0x30},
-       {0x95b8, 0x30, 0x0, 0x0, 0x30},
-       {0x4b60, 0x80, 0x0, 0x0, 0x40},
+       {0x6210, 0x10, 0x0, 0x0, 0x10},
+       {0xb820, 0x30, 0x0, 0x0, 0x30},
+       {0x96c0, 0x30, 0x0, 0x0, 0x30},
+       {0x4b68, 0x80, 0x0, 0x0, 0x40},
        {0x1f8, 0x4, 0x0, 0x0, 0x4},
-       {0x53a0, 0x80, 0x4, 0x0, 0x4},
-       {0xc7c8, 0x0, 0x0, 0x0, 0x4},
-       {0x4ba0, 0x80, 0x0, 0x0, 0x20},
-       {0x8150, 0x40, 0x0, 0x0, 0x30},
-       {0xec70, 0x60, 0x0, 0x0, 0x60},
-       {0x2b48, 0x80, 0x0, 0x0, 0x38},
-       {0xf1b0, 0x78, 0x0, 0x0, 0x78},
+       {0x53a8, 0x80, 0x4, 0x0, 0x4},
+       {0xc7d0, 0x0, 0x0, 0x0, 0x4},
+       {0x4ba8, 0x80, 0x0, 0x0, 0x20},
+       {0x8158, 0x40, 0x0, 0x0, 0x30},
+       {0xe770, 0x60, 0x0, 0x0, 0x60},
+       {0x2cf0, 0x80, 0x0, 0x0, 0x38},
+       {0xf2b8, 0x78, 0x0, 0x0, 0x78},
        {0x1f8, 0x4, 0x0, 0x0, 0x4},
-       {0xaef8, 0x0, 0x0, 0x0, 0xf0},
-       {0xafe8, 0x8, 0x0, 0x0, 0x8},
+       {0xaf20, 0x0, 0x0, 0x0, 0xf0},
+       {0xb010, 0x8, 0x0, 0x0, 0x8},
        {0x1f8, 0x8, 0x0, 0x0, 0x8},
        {0xac0, 0x8, 0x0, 0x0, 0x8},
        {0x2578, 0x8, 0x0, 0x0, 0x8},
        {0x24f8, 0x8, 0x0, 0x0, 0x8},
        {0x0, 0x8, 0x0, 0x0, 0x8},
-       {0x200, 0x10, 0x8, 0x0, 0x8},
-       {0xb78, 0x10, 0x8, 0x0, 0x2},
-       {0xd9a8, 0x38, 0x0, 0x0, 0x24},
-       {0x12988, 0x10, 0x0, 0x0, 0x8},
-       {0x11fa0, 0x38, 0x0, 0x0, 0x18},
-       {0xa580, 0x38, 0x0, 0x0, 0x10},
-       {0x86f8, 0x30, 0x0, 0x0, 0x18},
-       {0x101f8, 0x10, 0x0, 0x0, 0x10},
-       {0xde28, 0x48, 0x0, 0x0, 0x38},
-       {0x10660, 0x20, 0x0, 0x0, 0x20},
-       {0x2b80, 0x80, 0x0, 0x0, 0x10},
-       {0x5020, 0x10, 0x0, 0x0, 0x10},
-       {0xc9b0, 0x30, 0x0, 0x0, 0x10},
-       {0xeec0, 0x10, 0x0, 0x0, 0x10},
+       {0x400, 0x18, 0x8, 0x0, 0x8},
+       {0xb78, 0x18, 0x8, 0x0, 0x2},
+       {0xd898, 0x50, 0x0, 0x0, 0x3c},
+       {0x12908, 0x18, 0x0, 0x0, 0x10},
+       {0x11aa8, 0x40, 0x0, 0x0, 0x18},
+       {0xa588, 0x50, 0x0, 0x0, 0x20},
+       {0x8700, 0x40, 0x0, 0x0, 0x28},
+       {0x10300, 0x18, 0x0, 0x0, 0x10},
+       {0xde48, 0x48, 0x0, 0x0, 0x38},
+       {0x10768, 0x20, 0x0, 0x0, 0x20},
+       {0x2d28, 0x80, 0x0, 0x0, 0x10},
+       {0x5048, 0x10, 0x0, 0x0, 0x10},
+       {0xc9b8, 0x30, 0x0, 0x0, 0x10},
+       {0xeee0, 0x10, 0x0, 0x0, 0x10},
+       {0xa3a0, 0x10, 0x0, 0x0, 0x10},
+       {0x13108, 0x8, 0x0, 0x0, 0x8},
 };
 
 /* Runtime array offsets */
-#define DORQ_REG_PF_MAX_ICID_0_RT_OFFSET       0
-#define DORQ_REG_PF_MAX_ICID_1_RT_OFFSET       1
-#define DORQ_REG_PF_MAX_ICID_2_RT_OFFSET       2
-#define DORQ_REG_PF_MAX_ICID_3_RT_OFFSET       3
-#define DORQ_REG_PF_MAX_ICID_4_RT_OFFSET       4
-#define DORQ_REG_PF_MAX_ICID_5_RT_OFFSET       5
-#define DORQ_REG_PF_MAX_ICID_6_RT_OFFSET       6
-#define DORQ_REG_PF_MAX_ICID_7_RT_OFFSET       7
-#define DORQ_REG_VF_MAX_ICID_0_RT_OFFSET       8
-#define DORQ_REG_VF_MAX_ICID_1_RT_OFFSET       9
-#define DORQ_REG_VF_MAX_ICID_2_RT_OFFSET       10
-#define DORQ_REG_VF_MAX_ICID_3_RT_OFFSET       11
-#define DORQ_REG_VF_MAX_ICID_4_RT_OFFSET       12
-#define DORQ_REG_VF_MAX_ICID_5_RT_OFFSET       13
-#define DORQ_REG_VF_MAX_ICID_6_RT_OFFSET       14
-#define DORQ_REG_VF_MAX_ICID_7_RT_OFFSET       15
-#define DORQ_REG_PF_WAKE_ALL_RT_OFFSET 16
-#define DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET      17
-#define IGU_REG_PF_CONFIGURATION_RT_OFFSET     18
-#define IGU_REG_VF_CONFIGURATION_RT_OFFSET     19
-#define IGU_REG_ATTN_MSG_ADDR_L_RT_OFFSET      20
-#define IGU_REG_ATTN_MSG_ADDR_H_RT_OFFSET      21
-#define IGU_REG_LEADING_EDGE_LATCH_RT_OFFSET   22
-#define IGU_REG_TRAILING_EDGE_LATCH_RT_OFFSET  23
-#define CAU_REG_CQE_AGG_UNIT_SIZE_RT_OFFSET    24
-#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET        761
-#define CAU_REG_SB_VAR_MEMORY_RT_SIZE  736
-#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET        761
-#define CAU_REG_SB_VAR_MEMORY_RT_SIZE  736
-#define CAU_REG_SB_ADDR_MEMORY_RT_OFFSET       1497
-#define CAU_REG_SB_ADDR_MEMORY_RT_SIZE 736
-#define CAU_REG_PI_MEMORY_RT_OFFSET    2233
-#define CAU_REG_PI_MEMORY_RT_SIZE      4416
-#define PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET   6649
-#define PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET     6650
-#define PRS_REG_TASK_ID_MAX_INITIATOR_VF_RT_OFFSET     6651
-#define PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET        6652
-#define PRS_REG_TASK_ID_MAX_TARGET_VF_RT_OFFSET        6653
-#define PRS_REG_SEARCH_TCP_RT_OFFSET   6654
-#define PRS_REG_SEARCH_FCOE_RT_OFFSET  6655
-#define PRS_REG_SEARCH_ROCE_RT_OFFSET  6656
-#define PRS_REG_ROCE_DEST_QP_MAX_VF_RT_OFFSET  6657
-#define PRS_REG_ROCE_DEST_QP_MAX_PF_RT_OFFSET  6658
-#define PRS_REG_SEARCH_OPENFLOW_RT_OFFSET      6659
-#define PRS_REG_SEARCH_NON_IP_AS_OPENFLOW_RT_OFFSET    6660
-#define PRS_REG_OPENFLOW_SUPPORT_ONLY_KNOWN_OVER_IP_RT_OFFSET  6661
-#define PRS_REG_OPENFLOW_SEARCH_KEY_MASK_RT_OFFSET     6662
-#define PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET      6663
-#define PRS_REG_LIGHT_L2_ETHERTYPE_EN_RT_OFFSET        6664
-#define SRC_REG_FIRSTFREE_RT_OFFSET    6665
-#define SRC_REG_FIRSTFREE_RT_SIZE      2
-#define SRC_REG_LASTFREE_RT_OFFSET     6667
-#define SRC_REG_LASTFREE_RT_SIZE       2
-#define SRC_REG_COUNTFREE_RT_OFFSET    6669
-#define SRC_REG_NUMBER_HASH_BITS_RT_OFFSET     6670
-#define PSWRQ2_REG_CDUT_P_SIZE_RT_OFFSET       6671
-#define PSWRQ2_REG_CDUC_P_SIZE_RT_OFFSET       6672
-#define PSWRQ2_REG_TM_P_SIZE_RT_OFFSET 6673
-#define PSWRQ2_REG_QM_P_SIZE_RT_OFFSET 6674
-#define PSWRQ2_REG_SRC_P_SIZE_RT_OFFSET        6675
-#define PSWRQ2_REG_TSDM_P_SIZE_RT_OFFSET       6676
-#define PSWRQ2_REG_TM_FIRST_ILT_RT_OFFSET      6677
-#define PSWRQ2_REG_TM_LAST_ILT_RT_OFFSET       6678
-#define PSWRQ2_REG_QM_FIRST_ILT_RT_OFFSET      6679
-#define PSWRQ2_REG_QM_LAST_ILT_RT_OFFSET       6680
-#define PSWRQ2_REG_SRC_FIRST_ILT_RT_OFFSET     6681
-#define PSWRQ2_REG_SRC_LAST_ILT_RT_OFFSET      6682
-#define PSWRQ2_REG_CDUC_FIRST_ILT_RT_OFFSET    6683
-#define PSWRQ2_REG_CDUC_LAST_ILT_RT_OFFSET     6684
-#define PSWRQ2_REG_CDUT_FIRST_ILT_RT_OFFSET    6685
-#define PSWRQ2_REG_CDUT_LAST_ILT_RT_OFFSET     6686
-#define PSWRQ2_REG_TSDM_FIRST_ILT_RT_OFFSET    6687
-#define PSWRQ2_REG_TSDM_LAST_ILT_RT_OFFSET     6688
-#define PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET    6689
-#define PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET  6690
-#define PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET  6691
-#define PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET      6692
-#define PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET    6693
-#define PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET    6694
-#define PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET  6695
-#define PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET        6696
-#define PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET        6697
-#define PSWRQ2_REG_VF_BASE_RT_OFFSET   6698
-#define PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET       6699
-#define PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET     6700
-#define PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET     6701
-#define PSWRQ2_REG_ILT_MEMORY_RT_OFFSET        6702
-#define PSWRQ2_REG_ILT_MEMORY_RT_SIZE  22000
-#define PGLUE_REG_B_VF_BASE_RT_OFFSET  28702
-#define PGLUE_REG_B_MSDM_OFFSET_MASK_B_RT_OFFSET       28703
-#define PGLUE_REG_B_MSDM_VF_SHIFT_B_RT_OFFSET  28704
-#define PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET  28705
-#define PGLUE_REG_B_PF_BAR0_SIZE_RT_OFFSET     28706
-#define PGLUE_REG_B_PF_BAR1_SIZE_RT_OFFSET     28707
-#define PGLUE_REG_B_VF_BAR1_SIZE_RT_OFFSET     28708
-#define TM_REG_VF_ENABLE_CONN_RT_OFFSET        28709
-#define TM_REG_PF_ENABLE_CONN_RT_OFFSET        28710
-#define TM_REG_PF_ENABLE_TASK_RT_OFFSET        28711
-#define TM_REG_GROUP_SIZE_RESOLUTION_CONN_RT_OFFSET    28712
-#define TM_REG_GROUP_SIZE_RESOLUTION_TASK_RT_OFFSET    28713
-#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET       28714
-#define TM_REG_CONFIG_CONN_MEM_RT_SIZE 416
-#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET       29130
-#define TM_REG_CONFIG_TASK_MEM_RT_SIZE 608
-#define QM_REG_MAXPQSIZE_0_RT_OFFSET   29738
-#define QM_REG_MAXPQSIZE_1_RT_OFFSET   29739
-#define QM_REG_MAXPQSIZE_2_RT_OFFSET   29740
-#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET      29741
-#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET      29742
-#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET      29743
-#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET      29744
-#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET      29745
-#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET      29746
-#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET      29747
-#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET      29748
-#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET      29749
-#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET      29750
-#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET     29751
-#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET     29752
-#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET     29753
-#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET     29754
-#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET     29755
-#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET     29756
-#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET     29757
-#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET     29758
-#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET     29759
-#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET     29760
-#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET     29761
-#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET     29762
-#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET     29763
-#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET     29764
-#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET     29765
-#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET     29766
-#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET     29767
-#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET     29768
-#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET     29769
-#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET     29770
-#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET     29771
-#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET     29772
-#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET     29773
-#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET     29774
-#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET     29775
-#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET     29776
-#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET     29777
-#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET     29778
-#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET     29779
-#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET     29780
-#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET     29781
-#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET     29782
-#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET     29783
-#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET     29784
-#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET     29785
-#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET     29786
-#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET     29787
-#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET     29788
-#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET     29789
-#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET     29790
-#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET     29791
-#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET     29792
-#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET     29793
-#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET     29794
-#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET     29795
-#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET     29796
-#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET     29797
-#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET     29798
-#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET     29799
-#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET     29800
-#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET     29801
-#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET     29802
-#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET     29803
-#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET     29804
-#define QM_REG_BASEADDROTHERPQ_RT_OFFSET       29805
-#define QM_REG_BASEADDROTHERPQ_RT_SIZE 128
-#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET    29933
-#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET    29934
-#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET     29935
-#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET   29936
-#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET  29937
-#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET       29938
-#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET       29939
-#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET       29940
-#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET       29941
-#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET       29942
-#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET       29943
-#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET       29944
-#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET       29945
-#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET       29946
-#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET       29947
-#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET      29948
-#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET      29949
-#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET      29950
-#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET      29951
-#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET      29952
-#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET      29953
-#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET   29954
-#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET   29955
-#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET   29956
-#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET   29957
-#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET      29958
-#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET      29959
-#define QM_REG_PQTX2PF_0_RT_OFFSET     29960
-#define QM_REG_PQTX2PF_1_RT_OFFSET     29961
-#define QM_REG_PQTX2PF_2_RT_OFFSET     29962
-#define QM_REG_PQTX2PF_3_RT_OFFSET     29963
-#define QM_REG_PQTX2PF_4_RT_OFFSET     29964
-#define QM_REG_PQTX2PF_5_RT_OFFSET     29965
-#define QM_REG_PQTX2PF_6_RT_OFFSET     29966
-#define QM_REG_PQTX2PF_7_RT_OFFSET     29967
-#define QM_REG_PQTX2PF_8_RT_OFFSET     29968
-#define QM_REG_PQTX2PF_9_RT_OFFSET     29969
-#define QM_REG_PQTX2PF_10_RT_OFFSET    29970
-#define QM_REG_PQTX2PF_11_RT_OFFSET    29971
-#define QM_REG_PQTX2PF_12_RT_OFFSET    29972
-#define QM_REG_PQTX2PF_13_RT_OFFSET    29973
-#define QM_REG_PQTX2PF_14_RT_OFFSET    29974
-#define QM_REG_PQTX2PF_15_RT_OFFSET    29975
-#define QM_REG_PQTX2PF_16_RT_OFFSET    29976
-#define QM_REG_PQTX2PF_17_RT_OFFSET    29977
-#define QM_REG_PQTX2PF_18_RT_OFFSET    29978
-#define QM_REG_PQTX2PF_19_RT_OFFSET    29979
-#define QM_REG_PQTX2PF_20_RT_OFFSET    29980
-#define QM_REG_PQTX2PF_21_RT_OFFSET    29981
-#define QM_REG_PQTX2PF_22_RT_OFFSET    29982
-#define QM_REG_PQTX2PF_23_RT_OFFSET    29983
-#define QM_REG_PQTX2PF_24_RT_OFFSET    29984
-#define QM_REG_PQTX2PF_25_RT_OFFSET    29985
-#define QM_REG_PQTX2PF_26_RT_OFFSET    29986
-#define QM_REG_PQTX2PF_27_RT_OFFSET    29987
-#define QM_REG_PQTX2PF_28_RT_OFFSET    29988
-#define QM_REG_PQTX2PF_29_RT_OFFSET    29989
-#define QM_REG_PQTX2PF_30_RT_OFFSET    29990
-#define QM_REG_PQTX2PF_31_RT_OFFSET    29991
-#define QM_REG_PQTX2PF_32_RT_OFFSET    29992
-#define QM_REG_PQTX2PF_33_RT_OFFSET    29993
-#define QM_REG_PQTX2PF_34_RT_OFFSET    29994
-#define QM_REG_PQTX2PF_35_RT_OFFSET    29995
-#define QM_REG_PQTX2PF_36_RT_OFFSET    29996
-#define QM_REG_PQTX2PF_37_RT_OFFSET    29997
-#define QM_REG_PQTX2PF_38_RT_OFFSET    29998
-#define QM_REG_PQTX2PF_39_RT_OFFSET    29999
-#define QM_REG_PQTX2PF_40_RT_OFFSET    30000
-#define QM_REG_PQTX2PF_41_RT_OFFSET    30001
-#define QM_REG_PQTX2PF_42_RT_OFFSET    30002
-#define QM_REG_PQTX2PF_43_RT_OFFSET    30003
-#define QM_REG_PQTX2PF_44_RT_OFFSET    30004
-#define QM_REG_PQTX2PF_45_RT_OFFSET    30005
-#define QM_REG_PQTX2PF_46_RT_OFFSET    30006
-#define QM_REG_PQTX2PF_47_RT_OFFSET    30007
-#define QM_REG_PQTX2PF_48_RT_OFFSET    30008
-#define QM_REG_PQTX2PF_49_RT_OFFSET    30009
-#define QM_REG_PQTX2PF_50_RT_OFFSET    30010
-#define QM_REG_PQTX2PF_51_RT_OFFSET    30011
-#define QM_REG_PQTX2PF_52_RT_OFFSET    30012
-#define QM_REG_PQTX2PF_53_RT_OFFSET    30013
-#define QM_REG_PQTX2PF_54_RT_OFFSET    30014
-#define QM_REG_PQTX2PF_55_RT_OFFSET    30015
-#define QM_REG_PQTX2PF_56_RT_OFFSET    30016
-#define QM_REG_PQTX2PF_57_RT_OFFSET    30017
-#define QM_REG_PQTX2PF_58_RT_OFFSET    30018
-#define QM_REG_PQTX2PF_59_RT_OFFSET    30019
-#define QM_REG_PQTX2PF_60_RT_OFFSET    30020
-#define QM_REG_PQTX2PF_61_RT_OFFSET    30021
-#define QM_REG_PQTX2PF_62_RT_OFFSET    30022
-#define QM_REG_PQTX2PF_63_RT_OFFSET    30023
-#define QM_REG_PQOTHER2PF_0_RT_OFFSET  30024
-#define QM_REG_PQOTHER2PF_1_RT_OFFSET  30025
-#define QM_REG_PQOTHER2PF_2_RT_OFFSET  30026
-#define QM_REG_PQOTHER2PF_3_RT_OFFSET  30027
-#define QM_REG_PQOTHER2PF_4_RT_OFFSET  30028
-#define QM_REG_PQOTHER2PF_5_RT_OFFSET  30029
-#define QM_REG_PQOTHER2PF_6_RT_OFFSET  30030
-#define QM_REG_PQOTHER2PF_7_RT_OFFSET  30031
-#define QM_REG_PQOTHER2PF_8_RT_OFFSET  30032
-#define QM_REG_PQOTHER2PF_9_RT_OFFSET  30033
-#define QM_REG_PQOTHER2PF_10_RT_OFFSET 30034
-#define QM_REG_PQOTHER2PF_11_RT_OFFSET 30035
-#define QM_REG_PQOTHER2PF_12_RT_OFFSET 30036
-#define QM_REG_PQOTHER2PF_13_RT_OFFSET 30037
-#define QM_REG_PQOTHER2PF_14_RT_OFFSET 30038
-#define QM_REG_PQOTHER2PF_15_RT_OFFSET 30039
-#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET        30040
-#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET        30041
-#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET   30042
-#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET   30043
-#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET     30044
-#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET     30045
-#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET     30046
-#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET     30047
-#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET     30048
-#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET     30049
-#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET     30050
-#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET     30051
-#define QM_REG_RLGLBLINCVAL_RT_OFFSET  30052
-#define QM_REG_RLGLBLINCVAL_RT_SIZE    256
-#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET      30308
-#define QM_REG_RLGLBLUPPERBOUND_RT_SIZE        256
-#define QM_REG_RLGLBLCRD_RT_OFFSET     30564
-#define QM_REG_RLGLBLCRD_RT_SIZE       256
-#define QM_REG_RLGLBLENABLE_RT_OFFSET  30820
-#define QM_REG_RLPFPERIOD_RT_OFFSET    30821
-#define QM_REG_RLPFPERIODTIMER_RT_OFFSET       30822
-#define QM_REG_RLPFINCVAL_RT_OFFSET    30823
-#define QM_REG_RLPFINCVAL_RT_SIZE      16
-#define QM_REG_RLPFUPPERBOUND_RT_OFFSET        30839
-#define QM_REG_RLPFUPPERBOUND_RT_SIZE  16
-#define QM_REG_RLPFCRD_RT_OFFSET       30855
-#define QM_REG_RLPFCRD_RT_SIZE 16
-#define QM_REG_RLPFENABLE_RT_OFFSET    30871
-#define QM_REG_RLPFVOQENABLE_RT_OFFSET 30872
-#define QM_REG_WFQPFWEIGHT_RT_OFFSET   30873
-#define QM_REG_WFQPFWEIGHT_RT_SIZE     16
-#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET       30889
-#define QM_REG_WFQPFUPPERBOUND_RT_SIZE 16
-#define QM_REG_WFQPFCRD_RT_OFFSET      30905
-#define QM_REG_WFQPFCRD_RT_SIZE        256
-#define QM_REG_WFQPFENABLE_RT_OFFSET   31161
-#define QM_REG_WFQVPENABLE_RT_OFFSET   31162
-#define QM_REG_BASEADDRTXPQ_RT_OFFSET  31163
-#define QM_REG_BASEADDRTXPQ_RT_SIZE    512
-#define QM_REG_TXPQMAP_RT_OFFSET       31675
-#define QM_REG_TXPQMAP_RT_SIZE 512
-#define QM_REG_WFQVPWEIGHT_RT_OFFSET   32187
-#define QM_REG_WFQVPWEIGHT_RT_SIZE     512
-#define QM_REG_WFQVPCRD_RT_OFFSET      32699
-#define QM_REG_WFQVPCRD_RT_SIZE        512
-#define QM_REG_WFQVPMAP_RT_OFFSET      33211
-#define QM_REG_WFQVPMAP_RT_SIZE        512
-#define QM_REG_WFQPFCRD_MSB_RT_OFFSET  33723
-#define QM_REG_WFQPFCRD_MSB_RT_SIZE    320
-#define QM_REG_VOQCRDLINE_RT_OFFSET    34043
-#define QM_REG_VOQCRDLINE_RT_SIZE      36
-#define QM_REG_VOQINITCRDLINE_RT_OFFSET        34079
-#define QM_REG_VOQINITCRDLINE_RT_SIZE  36
-#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET      34115
-#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET        34116
-#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET        34117
-#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET        34118
-#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET        34119
-#define NIG_REG_OUTER_TAG_VALUE_MASK_RT_OFFSET 34120
-#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET     34121
-#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET      34122
-#define NIG_REG_LLH_FUNC_TAG_EN_RT_SIZE        4
-#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_OFFSET 34126
-#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_SIZE   4
-#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET   34130
-#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_SIZE     4
-#define NIG_REG_LLH_FUNC_NO_TAG_RT_OFFSET      34134
-#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET        34135
-#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_SIZE  32
-#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET   34167
-#define NIG_REG_LLH_FUNC_FILTER_EN_RT_SIZE     16
-#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 34183
-#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_SIZE   16
-#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET        34199
-#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE  16
-#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET      34215
-#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE        16
-#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 34231
-#define NIG_REG_ROCE_DUPLICATE_TO_HOST_RT_OFFSET       34232
-#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET      34233
-#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET      34234
-#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET      34235
-#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET  34236
-#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET  34237
-#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET  34238
-#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET  34239
-#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET       34240
-#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET       34241
-#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET       34242
-#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET       34243
-#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET   34244
-#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET        34245
-#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET      34246
-#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 34247
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET       34248
-#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET  34249
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET   34250
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET       34251
-#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET  34252
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET   34253
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET       34254
-#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET  34255
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET   34256
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET       34257
-#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET  34258
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET   34259
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET       34260
-#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET  34261
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET   34262
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET       34263
-#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET  34264
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET   34265
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET       34266
-#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET  34267
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET   34268
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET       34269
-#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET  34270
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET   34271
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET       34272
-#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET  34273
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET   34274
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET       34275
-#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET  34276
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET   34277
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET      34278
-#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 34279
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET  34280
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET      34281
-#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 34282
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET  34283
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET      34284
-#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 34285
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET  34286
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET      34287
-#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 34288
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET  34289
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET      34290
-#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 34291
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET  34292
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET      34293
-#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 34294
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET  34295
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET      34296
-#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 34297
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET  34298
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET      34299
-#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 34300
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET  34301
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET      34302
-#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 34303
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET  34304
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET      34305
-#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 34306
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET  34307
-#define XCM_REG_CON_PHY_Q3_RT_OFFSET   34308
-
-#define RUNTIME_ARRAY_SIZE 34309
+#define DORQ_REG_PF_MAX_ICID_0_RT_OFFSET                       0
+#define DORQ_REG_PF_MAX_ICID_1_RT_OFFSET                       1
+#define DORQ_REG_PF_MAX_ICID_2_RT_OFFSET                       2
+#define DORQ_REG_PF_MAX_ICID_3_RT_OFFSET                       3
+#define DORQ_REG_PF_MAX_ICID_4_RT_OFFSET                       4
+#define DORQ_REG_PF_MAX_ICID_5_RT_OFFSET                       5
+#define DORQ_REG_PF_MAX_ICID_6_RT_OFFSET                       6
+#define DORQ_REG_PF_MAX_ICID_7_RT_OFFSET                       7
+#define DORQ_REG_VF_MAX_ICID_0_RT_OFFSET                       8
+#define DORQ_REG_VF_MAX_ICID_1_RT_OFFSET                       9
+#define DORQ_REG_VF_MAX_ICID_2_RT_OFFSET                       10
+#define DORQ_REG_VF_MAX_ICID_3_RT_OFFSET                       11
+#define DORQ_REG_VF_MAX_ICID_4_RT_OFFSET                       12
+#define DORQ_REG_VF_MAX_ICID_5_RT_OFFSET                       13
+#define DORQ_REG_VF_MAX_ICID_6_RT_OFFSET                       14
+#define DORQ_REG_VF_MAX_ICID_7_RT_OFFSET                       15
+#define DORQ_REG_PF_WAKE_ALL_RT_OFFSET                         16
+#define DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET                      17
+#define DORQ_REG_GLB_MAX_ICID_0_RT_OFFSET                      18
+#define DORQ_REG_GLB_MAX_ICID_1_RT_OFFSET                      19
+#define DORQ_REG_GLB_RANGE2CONN_TYPE_0_RT_OFFSET               20
+#define DORQ_REG_GLB_RANGE2CONN_TYPE_1_RT_OFFSET               21
+#define DORQ_REG_PRV_PF_MAX_ICID_2_RT_OFFSET                   22
+#define DORQ_REG_PRV_PF_MAX_ICID_3_RT_OFFSET                   23
+#define DORQ_REG_PRV_PF_MAX_ICID_4_RT_OFFSET                   24
+#define DORQ_REG_PRV_PF_MAX_ICID_5_RT_OFFSET                   25
+#define DORQ_REG_PRV_VF_MAX_ICID_2_RT_OFFSET                   26
+#define DORQ_REG_PRV_VF_MAX_ICID_3_RT_OFFSET                   27
+#define DORQ_REG_PRV_VF_MAX_ICID_4_RT_OFFSET                   28
+#define DORQ_REG_PRV_VF_MAX_ICID_5_RT_OFFSET                   29
+#define DORQ_REG_PRV_PF_RANGE2CONN_TYPE_2_RT_OFFSET            30
+#define DORQ_REG_PRV_PF_RANGE2CONN_TYPE_3_RT_OFFSET            31
+#define DORQ_REG_PRV_PF_RANGE2CONN_TYPE_4_RT_OFFSET            32
+#define DORQ_REG_PRV_PF_RANGE2CONN_TYPE_5_RT_OFFSET            33
+#define DORQ_REG_PRV_VF_RANGE2CONN_TYPE_2_RT_OFFSET            34
+#define DORQ_REG_PRV_VF_RANGE2CONN_TYPE_3_RT_OFFSET            35
+#define DORQ_REG_PRV_VF_RANGE2CONN_TYPE_4_RT_OFFSET            36
+#define DORQ_REG_PRV_VF_RANGE2CONN_TYPE_5_RT_OFFSET            37
+#define IGU_REG_PF_CONFIGURATION_RT_OFFSET                     38
+#define IGU_REG_VF_CONFIGURATION_RT_OFFSET                     39
+#define IGU_REG_ATTN_MSG_ADDR_L_RT_OFFSET                      40
+#define IGU_REG_ATTN_MSG_ADDR_H_RT_OFFSET                      41
+#define IGU_REG_LEADING_EDGE_LATCH_RT_OFFSET                   42
+#define IGU_REG_TRAILING_EDGE_LATCH_RT_OFFSET                  43
+#define CAU_REG_CQE_AGG_UNIT_SIZE_RT_OFFSET                    44
+#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET                                45
+#define CAU_REG_SB_VAR_MEMORY_RT_SIZE                          1024
+#define CAU_REG_SB_ADDR_MEMORY_RT_OFFSET                       1069
+#define CAU_REG_SB_ADDR_MEMORY_RT_SIZE                         1024
+#define CAU_REG_PI_MEMORY_RT_OFFSET                            2093
+#define CAU_REG_PI_MEMORY_RT_SIZE                              4416
+#define PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET           6509
+#define PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET             6510
+#define PRS_REG_TASK_ID_MAX_INITIATOR_VF_RT_OFFSET             6511
+#define PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET                        6512
+#define PRS_REG_TASK_ID_MAX_TARGET_VF_RT_OFFSET                        6513
+#define PRS_REG_SEARCH_TCP_RT_OFFSET                           6514
+#define PRS_REG_SEARCH_FCOE_RT_OFFSET                          6515
+#define PRS_REG_SEARCH_ROCE_RT_OFFSET                          6516
+#define PRS_REG_ROCE_DEST_QP_MAX_VF_RT_OFFSET                  6517
+#define PRS_REG_ROCE_DEST_QP_MAX_PF_RT_OFFSET                  6518
+#define PRS_REG_SEARCH_OPENFLOW_RT_OFFSET                      6519
+#define PRS_REG_SEARCH_NON_IP_AS_OPENFLOW_RT_OFFSET            6520
+#define PRS_REG_OPENFLOW_SUPPORT_ONLY_KNOWN_OVER_IP_RT_OFFSET  6521
+#define PRS_REG_OPENFLOW_SEARCH_KEY_MASK_RT_OFFSET             6522
+#define PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET                      6523
+#define PRS_REG_LIGHT_L2_ETHERTYPE_EN_RT_OFFSET                        6524
+#define SRC_REG_FIRSTFREE_RT_OFFSET                            6525
+#define SRC_REG_FIRSTFREE_RT_SIZE                              2
+#define SRC_REG_LASTFREE_RT_OFFSET                             6527
+#define SRC_REG_LASTFREE_RT_SIZE                               2
+#define SRC_REG_COUNTFREE_RT_OFFSET                            6529
+#define SRC_REG_NUMBER_HASH_BITS_RT_OFFSET                     6530
+#define PSWRQ2_REG_CDUT_P_SIZE_RT_OFFSET                       6531
+#define PSWRQ2_REG_CDUC_P_SIZE_RT_OFFSET                       6532
+#define PSWRQ2_REG_TM_P_SIZE_RT_OFFSET                         6533
+#define PSWRQ2_REG_QM_P_SIZE_RT_OFFSET                         6534
+#define PSWRQ2_REG_SRC_P_SIZE_RT_OFFSET                                6535
+#define PSWRQ2_REG_TSDM_P_SIZE_RT_OFFSET                       6536
+#define PSWRQ2_REG_TM_FIRST_ILT_RT_OFFSET                      6537
+#define PSWRQ2_REG_TM_LAST_ILT_RT_OFFSET                       6538
+#define PSWRQ2_REG_QM_FIRST_ILT_RT_OFFSET                      6539
+#define PSWRQ2_REG_QM_LAST_ILT_RT_OFFSET                       6540
+#define PSWRQ2_REG_SRC_FIRST_ILT_RT_OFFSET                     6541
+#define PSWRQ2_REG_SRC_LAST_ILT_RT_OFFSET                      6542
+#define PSWRQ2_REG_CDUC_FIRST_ILT_RT_OFFSET                    6543
+#define PSWRQ2_REG_CDUC_LAST_ILT_RT_OFFSET                     6544
+#define PSWRQ2_REG_CDUT_FIRST_ILT_RT_OFFSET                    6545
+#define PSWRQ2_REG_CDUT_LAST_ILT_RT_OFFSET                     6546
+#define PSWRQ2_REG_TSDM_FIRST_ILT_RT_OFFSET                    6547
+#define PSWRQ2_REG_TSDM_LAST_ILT_RT_OFFSET                     6548
+#define PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET            6549
+#define PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET          6550
+#define PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET          6551
+#define PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET                      6552
+#define PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET                    6553
+#define PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET                    6554
+#define PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET                  6555
+#define PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET                        6556
+#define PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET                        6557
+#define PSWRQ2_REG_VF_BASE_RT_OFFSET                           6558
+#define PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET                       6559
+#define PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET                     6560
+#define PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET                     6561
+#define PSWRQ2_REG_TGSRC_FIRST_ILT_RT_OFFSET                   6562
+#define PSWRQ2_REG_RGSRC_FIRST_ILT_RT_OFFSET                   6563
+#define PSWRQ2_REG_TGSRC_LAST_ILT_RT_OFFSET                    6564
+#define PSWRQ2_REG_RGSRC_LAST_ILT_RT_OFFSET                    6565
+#define PSWRQ2_REG_ILT_MEMORY_RT_OFFSET                                6566
+#define PSWRQ2_REG_ILT_MEMORY_RT_SIZE                          26414
+#define PGLUE_REG_B_VF_BASE_RT_OFFSET                          32980
+#define PGLUE_REG_B_MSDM_OFFSET_MASK_B_RT_OFFSET               32981
+#define PGLUE_REG_B_MSDM_VF_SHIFT_B_RT_OFFSET                  32982
+#define PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET                  32983
+#define PGLUE_REG_B_PF_BAR0_SIZE_RT_OFFSET                     32984
+#define PGLUE_REG_B_PF_BAR1_SIZE_RT_OFFSET                     32985
+#define PGLUE_REG_B_VF_BAR1_SIZE_RT_OFFSET                     32986
+#define TM_REG_VF_ENABLE_CONN_RT_OFFSET                                32987
+#define TM_REG_PF_ENABLE_CONN_RT_OFFSET                                32988
+#define TM_REG_PF_ENABLE_TASK_RT_OFFSET                                32989
+#define TM_REG_GROUP_SIZE_RESOLUTION_CONN_RT_OFFSET            32990
+#define TM_REG_GROUP_SIZE_RESOLUTION_TASK_RT_OFFSET            32991
+#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET                       32992
+#define TM_REG_CONFIG_CONN_MEM_RT_SIZE                         416
+#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET                       33408
+#define TM_REG_CONFIG_TASK_MEM_RT_SIZE                         608
+#define QM_REG_MAXPQSIZE_0_RT_OFFSET                           34016
+#define QM_REG_MAXPQSIZE_1_RT_OFFSET                           34017
+#define QM_REG_MAXPQSIZE_2_RT_OFFSET                           34018
+#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET                      34019
+#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET                      34020
+#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET                      34021
+#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET                      34022
+#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET                      34023
+#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET                      34024
+#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET                      34025
+#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET                      34026
+#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET                      34027
+#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET                      34028
+#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET                     34029
+#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET                     34030
+#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET                     34031
+#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET                     34032
+#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET                     34033
+#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET                     34034
+#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET                     34035
+#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET                     34036
+#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET                     34037
+#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET                     34038
+#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET                     34039
+#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET                     34040
+#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET                     34041
+#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET                     34042
+#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET                     34043
+#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET                     34044
+#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET                     34045
+#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET                     34046
+#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET                     34047
+#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET                     34048
+#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET                     34049
+#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET                     34050
+#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET                     34051
+#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET                     34052
+#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET                     34053
+#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET                     34054
+#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET                     34055
+#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET                     34056
+#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET                     34057
+#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET                     34058
+#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET                     34059
+#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET                     34060
+#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET                     34061
+#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET                     34062
+#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET                     34063
+#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET                     34064
+#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET                     34065
+#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET                     34066
+#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET                     34067
+#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET                     34068
+#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET                     34069
+#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET                     34070
+#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET                     34071
+#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET                     34072
+#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET                     34073
+#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET                     34074
+#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET                     34075
+#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET                     34076
+#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET                     34077
+#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET                     34078
+#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET                     34079
+#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET                     34080
+#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET                     34081
+#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET                     34082
+#define QM_REG_BASEADDROTHERPQ_RT_OFFSET                       34083
+#define QM_REG_BASEADDROTHERPQ_RT_SIZE                         128
+#define QM_REG_PTRTBLOTHER_RT_OFFSET                           34211
+#define QM_REG_PTRTBLOTHER_RT_SIZE                             256
+#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET                    34467
+#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET                    34468
+#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET                     34469
+#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET                   34470
+#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET                  34471
+#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET                       34472
+#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET                       34473
+#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET                       34474
+#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET                       34475
+#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET                       34476
+#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET                       34477
+#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET                       34478
+#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET                       34479
+#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET                       34480
+#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET                       34481
+#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET                      34482
+#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET                      34483
+#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET                      34484
+#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET                      34485
+#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET                      34486
+#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET                      34487
+#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET                   34488
+#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET                   34489
+#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET                   34490
+#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET                   34491
+#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET                      34492
+#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET                      34493
+#define QM_REG_PQTX2PF_0_RT_OFFSET                             34494
+#define QM_REG_PQTX2PF_1_RT_OFFSET                             34495
+#define QM_REG_PQTX2PF_2_RT_OFFSET                             34496
+#define QM_REG_PQTX2PF_3_RT_OFFSET                             34497
+#define QM_REG_PQTX2PF_4_RT_OFFSET                             34498
+#define QM_REG_PQTX2PF_5_RT_OFFSET                             34499
+#define QM_REG_PQTX2PF_6_RT_OFFSET                             34500
+#define QM_REG_PQTX2PF_7_RT_OFFSET                             34501
+#define QM_REG_PQTX2PF_8_RT_OFFSET                             34502
+#define QM_REG_PQTX2PF_9_RT_OFFSET                             34503
+#define QM_REG_PQTX2PF_10_RT_OFFSET                            34504
+#define QM_REG_PQTX2PF_11_RT_OFFSET                            34505
+#define QM_REG_PQTX2PF_12_RT_OFFSET                            34506
+#define QM_REG_PQTX2PF_13_RT_OFFSET                            34507
+#define QM_REG_PQTX2PF_14_RT_OFFSET                            34508
+#define QM_REG_PQTX2PF_15_RT_OFFSET                            34509
+#define QM_REG_PQTX2PF_16_RT_OFFSET                            34510
+#define QM_REG_PQTX2PF_17_RT_OFFSET                            34511
+#define QM_REG_PQTX2PF_18_RT_OFFSET                            34512
+#define QM_REG_PQTX2PF_19_RT_OFFSET                            34513
+#define QM_REG_PQTX2PF_20_RT_OFFSET                            34514
+#define QM_REG_PQTX2PF_21_RT_OFFSET                            34515
+#define QM_REG_PQTX2PF_22_RT_OFFSET                            34516
+#define QM_REG_PQTX2PF_23_RT_OFFSET                            34517
+#define QM_REG_PQTX2PF_24_RT_OFFSET                            34518
+#define QM_REG_PQTX2PF_25_RT_OFFSET                            34519
+#define QM_REG_PQTX2PF_26_RT_OFFSET                            34520
+#define QM_REG_PQTX2PF_27_RT_OFFSET                            34521
+#define QM_REG_PQTX2PF_28_RT_OFFSET                            34522
+#define QM_REG_PQTX2PF_29_RT_OFFSET                            34523
+#define QM_REG_PQTX2PF_30_RT_OFFSET                            34524
+#define QM_REG_PQTX2PF_31_RT_OFFSET                            34525
+#define QM_REG_PQTX2PF_32_RT_OFFSET                            34526
+#define QM_REG_PQTX2PF_33_RT_OFFSET                            34527
+#define QM_REG_PQTX2PF_34_RT_OFFSET                            34528
+#define QM_REG_PQTX2PF_35_RT_OFFSET                            34529
+#define QM_REG_PQTX2PF_36_RT_OFFSET                            34530
+#define QM_REG_PQTX2PF_37_RT_OFFSET                            34531
+#define QM_REG_PQTX2PF_38_RT_OFFSET                            34532
+#define QM_REG_PQTX2PF_39_RT_OFFSET                            34533
+#define QM_REG_PQTX2PF_40_RT_OFFSET                            34534
+#define QM_REG_PQTX2PF_41_RT_OFFSET                            34535
+#define QM_REG_PQTX2PF_42_RT_OFFSET                            34536
+#define QM_REG_PQTX2PF_43_RT_OFFSET                            34537
+#define QM_REG_PQTX2PF_44_RT_OFFSET                            34538
+#define QM_REG_PQTX2PF_45_RT_OFFSET                            34539
+#define QM_REG_PQTX2PF_46_RT_OFFSET                            34540
+#define QM_REG_PQTX2PF_47_RT_OFFSET                            34541
+#define QM_REG_PQTX2PF_48_RT_OFFSET                            34542
+#define QM_REG_PQTX2PF_49_RT_OFFSET                            34543
+#define QM_REG_PQTX2PF_50_RT_OFFSET                            34544
+#define QM_REG_PQTX2PF_51_RT_OFFSET                            34545
+#define QM_REG_PQTX2PF_52_RT_OFFSET                            34546
+#define QM_REG_PQTX2PF_53_RT_OFFSET                            34547
+#define QM_REG_PQTX2PF_54_RT_OFFSET                            34548
+#define QM_REG_PQTX2PF_55_RT_OFFSET                            34549
+#define QM_REG_PQTX2PF_56_RT_OFFSET                            34550
+#define QM_REG_PQTX2PF_57_RT_OFFSET                            34551
+#define QM_REG_PQTX2PF_58_RT_OFFSET                            34552
+#define QM_REG_PQTX2PF_59_RT_OFFSET                            34553
+#define QM_REG_PQTX2PF_60_RT_OFFSET                            34554
+#define QM_REG_PQTX2PF_61_RT_OFFSET                            34555
+#define QM_REG_PQTX2PF_62_RT_OFFSET                            34556
+#define QM_REG_PQTX2PF_63_RT_OFFSET                            34557
+#define QM_REG_PQOTHER2PF_0_RT_OFFSET                          34558
+#define QM_REG_PQOTHER2PF_1_RT_OFFSET                          34559
+#define QM_REG_PQOTHER2PF_2_RT_OFFSET                          34560
+#define QM_REG_PQOTHER2PF_3_RT_OFFSET                          34561
+#define QM_REG_PQOTHER2PF_4_RT_OFFSET                          34562
+#define QM_REG_PQOTHER2PF_5_RT_OFFSET                          34563
+#define QM_REG_PQOTHER2PF_6_RT_OFFSET                          34564
+#define QM_REG_PQOTHER2PF_7_RT_OFFSET                          34565
+#define QM_REG_PQOTHER2PF_8_RT_OFFSET                          34566
+#define QM_REG_PQOTHER2PF_9_RT_OFFSET                          34567
+#define QM_REG_PQOTHER2PF_10_RT_OFFSET                         34568
+#define QM_REG_PQOTHER2PF_11_RT_OFFSET                         34569
+#define QM_REG_PQOTHER2PF_12_RT_OFFSET                         34570
+#define QM_REG_PQOTHER2PF_13_RT_OFFSET                         34571
+#define QM_REG_PQOTHER2PF_14_RT_OFFSET                         34572
+#define QM_REG_PQOTHER2PF_15_RT_OFFSET                         34573
+#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET                                34574
+#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET                                34575
+#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET                   34576
+#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET                   34577
+#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET                     34578
+#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET                     34579
+#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET                     34580
+#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET                     34581
+#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET                     34582
+#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET                     34583
+#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET                     34584
+#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET                     34585
+#define QM_REG_RLGLBLINCVAL_RT_OFFSET                          34586
+#define QM_REG_RLGLBLINCVAL_RT_SIZE                            256
+#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET                      34842
+#define QM_REG_RLGLBLUPPERBOUND_RT_SIZE                                256
+#define QM_REG_RLGLBLCRD_RT_OFFSET                             35098
+#define QM_REG_RLGLBLCRD_RT_SIZE                               256
+#define QM_REG_RLGLBLENABLE_RT_OFFSET                          35354
+#define QM_REG_RLPFPERIOD_RT_OFFSET                            35355
+#define QM_REG_RLPFPERIODTIMER_RT_OFFSET                       35356
+#define QM_REG_RLPFINCVAL_RT_OFFSET                            35357
+#define QM_REG_RLPFINCVAL_RT_SIZE                              16
+#define QM_REG_RLPFUPPERBOUND_RT_OFFSET                                35373
+#define QM_REG_RLPFUPPERBOUND_RT_SIZE                          16
+#define QM_REG_RLPFCRD_RT_OFFSET                               35389
+#define QM_REG_RLPFCRD_RT_SIZE                                 16
+#define QM_REG_RLPFENABLE_RT_OFFSET                            35405
+#define QM_REG_RLPFVOQENABLE_RT_OFFSET                         35406
+#define QM_REG_WFQPFWEIGHT_RT_OFFSET                           35407
+#define QM_REG_WFQPFWEIGHT_RT_SIZE                             16
+#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET                       35423
+#define QM_REG_WFQPFUPPERBOUND_RT_SIZE                         16
+#define QM_REG_WFQPFCRD_RT_OFFSET                              35439
+#define QM_REG_WFQPFCRD_RT_SIZE                                        256
+#define QM_REG_WFQPFENABLE_RT_OFFSET                           35695
+#define QM_REG_WFQVPENABLE_RT_OFFSET                           35696
+#define QM_REG_BASEADDRTXPQ_RT_OFFSET                          35697
+#define QM_REG_BASEADDRTXPQ_RT_SIZE                            512
+#define QM_REG_TXPQMAP_RT_OFFSET                               36209
+#define QM_REG_TXPQMAP_RT_SIZE                                 512
+#define QM_REG_WFQVPWEIGHT_RT_OFFSET                           36721
+#define QM_REG_WFQVPWEIGHT_RT_SIZE                             512
+#define QM_REG_WFQVPCRD_RT_OFFSET                              37233
+#define QM_REG_WFQVPCRD_RT_SIZE                                        512
+#define QM_REG_WFQVPMAP_RT_OFFSET                              37745
+#define QM_REG_WFQVPMAP_RT_SIZE                                        512
+#define QM_REG_PTRTBLTX_RT_OFFSET                              38257
+#define QM_REG_PTRTBLTX_RT_SIZE                                        1024
+#define QM_REG_WFQPFCRD_MSB_RT_OFFSET                          39281
+#define QM_REG_WFQPFCRD_MSB_RT_SIZE                            320
+#define QM_REG_VOQCRDLINE_RT_OFFSET                            39601
+#define QM_REG_VOQCRDLINE_RT_SIZE                              36
+#define QM_REG_VOQINITCRDLINE_RT_OFFSET                                39637
+#define QM_REG_VOQINITCRDLINE_RT_SIZE                          36
+#define QM_REG_RLPFVOQENABLE_MSB_RT_OFFSET                     39673
+#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET                      39674
+#define NIG_REG_BRB_GATE_DNTFWD_PORT_RT_OFFSET                 39675
+#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET                        39676
+#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET                        39677
+#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET                        39678
+#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET                        39679
+#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET             39680
+#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET                      39681
+#define NIG_REG_LLH_FUNC_TAG_EN_RT_SIZE                                4
+#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET                   39685
+#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_SIZE                     4
+#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET                        39689
+#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_SIZE                  32
+#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET                   39721
+#define NIG_REG_LLH_FUNC_FILTER_EN_RT_SIZE                     16
+#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET                 39737
+#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_SIZE                   16
+#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET                39753
+#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE          16
+#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET              39769
+#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE                        16
+#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET                         39785
+#define NIG_REG_ROCE_DUPLICATE_TO_HOST_RT_OFFSET               39786
+#define NIG_REG_PPF_TO_ENGINE_SEL_RT_OFFSET                    39787
+#define NIG_REG_PPF_TO_ENGINE_SEL_RT_SIZE                      8
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_VALUE_RT_OFFSET         39795
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_VALUE_RT_SIZE           1024
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_EN_RT_OFFSET            40819
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_EN_RT_SIZE              512
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_MODE_RT_OFFSET          41331
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_MODE_RT_SIZE            512
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 41843
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE   512
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_HDR_SEL_RT_OFFSET       42355
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_HDR_SEL_RT_SIZE         512
+#define NIG_REG_LLH_PF_CLS_FILTERS_MAP_RT_OFFSET               42867
+#define NIG_REG_LLH_PF_CLS_FILTERS_MAP_RT_SIZE                 32
+#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET                      42899
+#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET                      42900
+#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET                      42901
+#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET                  42902
+#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET                  42903
+#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET                  42904
+#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET                  42905
+#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET               42906
+#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET               42907
+#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET               42908
+#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET               42909
+#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET                   42910
+#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET                        42911
+#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET                      42912
+#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET                 42913
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET               42914
+#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET                  42915
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET           42916
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET               42917
+#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET                  42918
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET           42919
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET               42920
+#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET                  42921
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET           42922
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET               42923
+#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET                  42924
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET           42925
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET               42926
+#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET                  42927
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET           42928
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET               42929
+#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET                  42930
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET           42931
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET               42932
+#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET                  42933
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET           42934
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET               42935
+#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET                  42936
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET           42937
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET               42938
+#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET                  42939
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET           42940
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET               42941
+#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET                  42942
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET           42943
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET              42944
+#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET                 42945
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET          42946
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET              42947
+#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET                 42948
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET          42949
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET              42950
+#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET                 42951
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET          42952
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET              42953
+#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET                 42954
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET          42955
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET              42956
+#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET                 42957
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET          42958
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET              42959
+#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET                 42960
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET          42961
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET              42962
+#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET                 42963
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET          42964
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET              42965
+#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET                 42966
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET          42967
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET              42968
+#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET                 42969
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET          42970
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET              42971
+#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET                 42972
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET          42973
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ20_RT_OFFSET              42974
+#define PBF_REG_BTB_GUARANTEED_VOQ20_RT_OFFSET                 42975
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ20_RT_OFFSET          42976
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ21_RT_OFFSET              42977
+#define PBF_REG_BTB_GUARANTEED_VOQ21_RT_OFFSET                 42978
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ21_RT_OFFSET          42979
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ22_RT_OFFSET              42980
+#define PBF_REG_BTB_GUARANTEED_VOQ22_RT_OFFSET                 42981
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ22_RT_OFFSET          42982
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ23_RT_OFFSET              42983
+#define PBF_REG_BTB_GUARANTEED_VOQ23_RT_OFFSET                 42984
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ23_RT_OFFSET          42985
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ24_RT_OFFSET              42986
+#define PBF_REG_BTB_GUARANTEED_VOQ24_RT_OFFSET                 42987
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ24_RT_OFFSET          42988
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ25_RT_OFFSET              42989
+#define PBF_REG_BTB_GUARANTEED_VOQ25_RT_OFFSET                 42990
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ25_RT_OFFSET          42991
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ26_RT_OFFSET              42992
+#define PBF_REG_BTB_GUARANTEED_VOQ26_RT_OFFSET                 42993
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ26_RT_OFFSET          42994
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ27_RT_OFFSET              42995
+#define PBF_REG_BTB_GUARANTEED_VOQ27_RT_OFFSET                 42996
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ27_RT_OFFSET          42997
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ28_RT_OFFSET              42998
+#define PBF_REG_BTB_GUARANTEED_VOQ28_RT_OFFSET                 42999
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ28_RT_OFFSET          43000
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ29_RT_OFFSET              43001
+#define PBF_REG_BTB_GUARANTEED_VOQ29_RT_OFFSET                 43002
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ29_RT_OFFSET          43003
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ30_RT_OFFSET              43004
+#define PBF_REG_BTB_GUARANTEED_VOQ30_RT_OFFSET                 43005
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ30_RT_OFFSET          43006
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ31_RT_OFFSET              43007
+#define PBF_REG_BTB_GUARANTEED_VOQ31_RT_OFFSET                 43008
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ31_RT_OFFSET          43009
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ32_RT_OFFSET              43010
+#define PBF_REG_BTB_GUARANTEED_VOQ32_RT_OFFSET                 43011
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ32_RT_OFFSET          43012
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ33_RT_OFFSET              43013
+#define PBF_REG_BTB_GUARANTEED_VOQ33_RT_OFFSET                 43014
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ33_RT_OFFSET          43015
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ34_RT_OFFSET              43016
+#define PBF_REG_BTB_GUARANTEED_VOQ34_RT_OFFSET                 43017
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ34_RT_OFFSET          43018
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ35_RT_OFFSET              43019
+#define PBF_REG_BTB_GUARANTEED_VOQ35_RT_OFFSET                 43020
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ35_RT_OFFSET          43021
+#define XCM_REG_CON_PHY_Q3_RT_OFFSET                           43022
+
+#define RUNTIME_ARRAY_SIZE     43023
+
+/* Init Callbacks */
+#define DMAE_READY_CB  0
 
 /* The eth storm context for the Tstorm */
 struct tstorm_eth_conn_st_ctx {
@@ -4436,219 +4935,219 @@ struct xstorm_eth_conn_st_ctx {
        __le32 reserved[60];
 };
 
-struct xstorm_eth_conn_ag_ctx {
+struct e4_xstorm_eth_conn_ag_ctx {
        u8 reserved0;
-       u8 eth_state;
+       u8 state;
        u8 flags0;
-#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK       0x1
-#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT      0
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED1_MASK          0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED1_SHIFT         1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED2_MASK          0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED2_SHIFT         2
-#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_MASK       0x1
-#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_SHIFT      3
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED3_MASK          0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED3_SHIFT         4
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED4_MASK          0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED4_SHIFT         5
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED5_MASK          0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED5_SHIFT         6
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED6_MASK          0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED6_SHIFT         7
+#define E4_XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK    0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT   0
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED1_MASK       0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED1_SHIFT      1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED2_MASK       0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED2_SHIFT      2
+#define E4_XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_MASK    0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_SHIFT   3
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED3_MASK       0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED3_SHIFT      4
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED4_MASK       0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED4_SHIFT      5
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED5_MASK       0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED5_SHIFT      6
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED6_MASK       0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED6_SHIFT      7
                u8 flags1;
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED7_MASK          0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED7_SHIFT         0
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED8_MASK          0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED8_SHIFT         1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED9_MASK          0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED9_SHIFT         2
-#define XSTORM_ETH_CONN_AG_CTX_BIT11_MASK              0x1
-#define XSTORM_ETH_CONN_AG_CTX_BIT11_SHIFT             3
-#define XSTORM_ETH_CONN_AG_CTX_BIT12_MASK              0x1
-#define XSTORM_ETH_CONN_AG_CTX_BIT12_SHIFT             4
-#define XSTORM_ETH_CONN_AG_CTX_BIT13_MASK              0x1
-#define XSTORM_ETH_CONN_AG_CTX_BIT13_SHIFT             5
-#define XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_MASK     0x1
-#define XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT    6
-#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_MASK       0x1
-#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT      7
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED7_MASK       0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED7_SHIFT      0
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED8_MASK       0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED8_SHIFT      1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED9_MASK       0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED9_SHIFT      2
+#define E4_XSTORM_ETH_CONN_AG_CTX_BIT11_MASK           0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_BIT11_SHIFT          3
+#define E4_XSTORM_ETH_CONN_AG_CTX_E5_RESERVED2_MASK    0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_E5_RESERVED2_SHIFT   4
+#define E4_XSTORM_ETH_CONN_AG_CTX_E5_RESERVED3_MASK    0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_E5_RESERVED3_SHIFT   5
+#define E4_XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_MASK  0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
+#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_MASK    0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT   7
        u8 flags2;
-#define XSTORM_ETH_CONN_AG_CTX_CF0_MASK                0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF0_SHIFT               0
-#define XSTORM_ETH_CONN_AG_CTX_CF1_MASK                0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF1_SHIFT               2
-#define XSTORM_ETH_CONN_AG_CTX_CF2_MASK                0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF2_SHIFT               4
-#define XSTORM_ETH_CONN_AG_CTX_CF3_MASK                0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF3_SHIFT               6
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF0_MASK     0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF0_SHIFT    0
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF1_MASK     0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF1_SHIFT    2
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF2_MASK     0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF2_SHIFT    4
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF3_MASK     0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF3_SHIFT    6
        u8 flags3;
-#define XSTORM_ETH_CONN_AG_CTX_CF4_MASK                0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF4_SHIFT               0
-#define XSTORM_ETH_CONN_AG_CTX_CF5_MASK                0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF5_SHIFT               2
-#define XSTORM_ETH_CONN_AG_CTX_CF6_MASK                0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF6_SHIFT               4
-#define XSTORM_ETH_CONN_AG_CTX_CF7_MASK                0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF7_SHIFT               6
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF4_MASK     0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF4_SHIFT    0
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF5_MASK     0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF5_SHIFT    2
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF6_MASK     0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF6_SHIFT    4
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF7_MASK     0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF7_SHIFT    6
                u8 flags4;
-#define XSTORM_ETH_CONN_AG_CTX_CF8_MASK                0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF8_SHIFT               0
-#define XSTORM_ETH_CONN_AG_CTX_CF9_MASK                0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF9_SHIFT               2
-#define XSTORM_ETH_CONN_AG_CTX_CF10_MASK               0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF10_SHIFT              4
-#define XSTORM_ETH_CONN_AG_CTX_CF11_MASK               0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF11_SHIFT              6
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF8_MASK     0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF8_SHIFT    0
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF9_MASK     0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF9_SHIFT    2
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF10_MASK    0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF10_SHIFT   4
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF11_MASK    0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF11_SHIFT   6
        u8 flags5;
-#define XSTORM_ETH_CONN_AG_CTX_CF12_MASK               0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF12_SHIFT              0
-#define XSTORM_ETH_CONN_AG_CTX_CF13_MASK               0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF13_SHIFT              2
-#define XSTORM_ETH_CONN_AG_CTX_CF14_MASK               0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF14_SHIFT              4
-#define XSTORM_ETH_CONN_AG_CTX_CF15_MASK               0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF15_SHIFT              6
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF12_MASK    0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF12_SHIFT   0
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF13_MASK    0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF13_SHIFT   2
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF14_MASK    0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF14_SHIFT   4
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF15_MASK    0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF15_SHIFT   6
        u8 flags6;
-#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK   0x3
-#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT  0
-#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_MASK   0x3
-#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT  2
-#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_MASK              0x3
-#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_SHIFT             4
-#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_MASK       0x3
-#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_SHIFT      6
+#define E4_XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK                0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT       0
+#define E4_XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_MASK                0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT       2
+#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_MASK                   0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_SHIFT                  4
+#define E4_XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_MASK            0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_SHIFT           6
        u8 flags7;
-#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_MASK           0x3
-#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_SHIFT          0
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED10_MASK         0x3
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED10_SHIFT                2
-#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_MASK          0x3
-#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_SHIFT         4
-#define XSTORM_ETH_CONN_AG_CTX_CF0EN_MASK              0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT             6
-#define XSTORM_ETH_CONN_AG_CTX_CF1EN_MASK              0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT             7
+#define E4_XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_MASK                0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_SHIFT       0
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED10_MASK      0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED10_SHIFT     2
+#define E4_XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_MASK       0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_SHIFT      4
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF0EN_MASK           0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT          6
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF1EN_MASK           0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT          7
        u8 flags8;
-#define XSTORM_ETH_CONN_AG_CTX_CF2EN_MASK              0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT             0
-#define XSTORM_ETH_CONN_AG_CTX_CF3EN_MASK              0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT             1
-#define XSTORM_ETH_CONN_AG_CTX_CF4EN_MASK              0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT             2
-#define XSTORM_ETH_CONN_AG_CTX_CF5EN_MASK              0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT             3
-#define XSTORM_ETH_CONN_AG_CTX_CF6EN_MASK              0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT             4
-#define XSTORM_ETH_CONN_AG_CTX_CF7EN_MASK              0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT             5
-#define XSTORM_ETH_CONN_AG_CTX_CF8EN_MASK              0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT             6
-#define XSTORM_ETH_CONN_AG_CTX_CF9EN_MASK              0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT             7
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF2EN_MASK   0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT  0
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF3EN_MASK   0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT  1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF4EN_MASK   0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT  2
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF5EN_MASK   0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT  3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF6EN_MASK   0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT  4
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF7EN_MASK   0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT  5
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF8EN_MASK   0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT  6
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF9EN_MASK   0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT  7
        u8 flags9;
-#define XSTORM_ETH_CONN_AG_CTX_CF10EN_MASK             0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT            0
-#define XSTORM_ETH_CONN_AG_CTX_CF11EN_MASK             0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF11EN_SHIFT            1
-#define XSTORM_ETH_CONN_AG_CTX_CF12EN_MASK             0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF12EN_SHIFT            2
-#define XSTORM_ETH_CONN_AG_CTX_CF13EN_MASK             0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF13EN_SHIFT            3
-#define XSTORM_ETH_CONN_AG_CTX_CF14EN_MASK             0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF14EN_SHIFT            4
-#define XSTORM_ETH_CONN_AG_CTX_CF15EN_MASK             0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF15EN_SHIFT            5
-#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK        0x1
-#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6
-#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK        0x1
-#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF10EN_MASK                  0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT                 0
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF11EN_MASK                  0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF11EN_SHIFT                 1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF12EN_MASK                  0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF12EN_SHIFT                 2
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF13EN_MASK                  0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF13EN_SHIFT                 3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF14EN_MASK                  0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF14EN_SHIFT                 4
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF15EN_MASK                  0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF15EN_SHIFT                 5
+#define E4_XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK     0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT    6
+#define E4_XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK     0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT    7
        u8 flags10;
-#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_MASK           0x1
-#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_SHIFT          0
-#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_MASK    0x1
-#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT   1
-#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_MASK                0x1
-#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT       2
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED11_MASK         0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED11_SHIFT                3
-#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_MASK       0x1
-#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_SHIFT      4
-#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED12_MASK         0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED12_SHIFT                6
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED13_MASK         0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED13_SHIFT                7
+#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_MASK                        0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_SHIFT               0
+#define E4_XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_MASK         0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT                1
+#define E4_XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_MASK             0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT            2
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED11_MASK              0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED11_SHIFT             3
+#define E4_XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_MASK            0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_SHIFT           4
+#define E4_XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK  0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED12_MASK              0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED12_SHIFT             6
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED13_MASK              0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED13_SHIFT             7
        u8 flags11;
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED14_MASK         0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED14_SHIFT                0
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED15_MASK         0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED15_SHIFT                1
-#define XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_MASK     0x1
-#define XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT    2
-#define XSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK            0x1
-#define XSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT           3
-#define XSTORM_ETH_CONN_AG_CTX_RULE6EN_MASK            0x1
-#define XSTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT           4
-#define XSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK            0x1
-#define XSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT           5
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_MASK       0x1
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_SHIFT      6
-#define XSTORM_ETH_CONN_AG_CTX_RULE9EN_MASK            0x1
-#define XSTORM_ETH_CONN_AG_CTX_RULE9EN_SHIFT           7
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED14_MASK      0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED14_SHIFT     0
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED15_MASK      0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED15_SHIFT     1
+#define E4_XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_MASK  0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK         0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT                3
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE6EN_MASK         0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT                4
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK         0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT                5
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_MASK    0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_SHIFT   6
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE9EN_MASK         0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE9EN_SHIFT                7
        u8 flags12;
-#define XSTORM_ETH_CONN_AG_CTX_RULE10EN_MASK           0x1
-#define XSTORM_ETH_CONN_AG_CTX_RULE10EN_SHIFT          0
-#define XSTORM_ETH_CONN_AG_CTX_RULE11EN_MASK           0x1
-#define XSTORM_ETH_CONN_AG_CTX_RULE11EN_SHIFT          1
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_MASK       0x1
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_SHIFT      2
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_MASK       0x1
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_SHIFT      3
-#define XSTORM_ETH_CONN_AG_CTX_RULE14EN_MASK           0x1
-#define XSTORM_ETH_CONN_AG_CTX_RULE14EN_SHIFT          4
-#define XSTORM_ETH_CONN_AG_CTX_RULE15EN_MASK           0x1
-#define XSTORM_ETH_CONN_AG_CTX_RULE15EN_SHIFT          5
-#define XSTORM_ETH_CONN_AG_CTX_RULE16EN_MASK           0x1
-#define XSTORM_ETH_CONN_AG_CTX_RULE16EN_SHIFT          6
-#define XSTORM_ETH_CONN_AG_CTX_RULE17EN_MASK           0x1
-#define XSTORM_ETH_CONN_AG_CTX_RULE17EN_SHIFT          7
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE10EN_MASK                0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE10EN_SHIFT       0
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE11EN_MASK                0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE11EN_SHIFT       1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_MASK    0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_SHIFT   2
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_MASK    0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_SHIFT   3
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE14EN_MASK                0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE14EN_SHIFT       4
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE15EN_MASK                0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE15EN_SHIFT       5
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE16EN_MASK                0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE16EN_SHIFT       6
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE17EN_MASK                0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE17EN_SHIFT       7
        u8 flags13;
-#define XSTORM_ETH_CONN_AG_CTX_RULE18EN_MASK           0x1
-#define XSTORM_ETH_CONN_AG_CTX_RULE18EN_SHIFT          0
-#define XSTORM_ETH_CONN_AG_CTX_RULE19EN_MASK           0x1
-#define XSTORM_ETH_CONN_AG_CTX_RULE19EN_SHIFT          1
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_MASK       0x1
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_SHIFT      2
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_MASK       0x1
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_SHIFT      3
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_MASK       0x1
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_SHIFT      4
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_MASK       0x1
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_SHIFT      5
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_MASK       0x1
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_SHIFT      6
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_MASK       0x1
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_SHIFT      7
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE18EN_MASK                0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE18EN_SHIFT       0
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE19EN_MASK                0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE19EN_SHIFT       1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_MASK    0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_SHIFT   2
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_MASK    0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_SHIFT   3
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_MASK    0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_SHIFT   4
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_MASK    0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_SHIFT   5
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_MASK    0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_SHIFT   6
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_MASK    0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_SHIFT   7
        u8 flags14;
-#define XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK   0x1
-#define XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT  0
-#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT        1
-#define XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2
-#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3
-#define XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_MASK     0x1
-#define XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT    4
-#define XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK   0x1
-#define XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT  5
-#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_MASK         0x3
-#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_SHIFT                6
+#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK                0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT       0
+#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK      0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT     1
+#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK    0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT   2
+#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK    0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT   3
+#define E4_XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_MASK          0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT         4
+#define E4_XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK                0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT       5
+#define E4_XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_MASK              0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_SHIFT             6
        u8 edpm_event_id;
        __le16 physical_q0;
-       __le16 ereserved1;
+       __le16 e5_reserved1;
        __le16 edpm_num_bds;
        __le16 tx_bd_cons;
        __le16 tx_bd_prod;
@@ -4681,7 +5180,7 @@ struct xstorm_eth_conn_ag_ctx {
        u8 byte13;
        u8 byte14;
        u8 byte15;
-       u8 ereserved;
+       u8 e5_reserved;
        __le16 word11;
        __le32 reg10;
        __le32 reg11;
@@ -4704,37 +5203,37 @@ struct ystorm_eth_conn_st_ctx {
        __le32 reserved[8];
 };
 
-struct ystorm_eth_conn_ag_ctx {
+struct e4_ystorm_eth_conn_ag_ctx {
        u8 byte0;
        u8 state;
        u8 flags0;
-#define YSTORM_ETH_CONN_AG_CTX_BIT0_MASK               0x1
-#define YSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT              0
-#define YSTORM_ETH_CONN_AG_CTX_BIT1_MASK               0x1
-#define YSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT              1
-#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK  0x3
-#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 2
-#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_MASK   0x3
-#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_SHIFT  4
-#define YSTORM_ETH_CONN_AG_CTX_CF2_MASK                        0x3
-#define YSTORM_ETH_CONN_AG_CTX_CF2_SHIFT               6
+#define E4_YSTORM_ETH_CONN_AG_CTX_BIT0_MASK                    0x1
+#define E4_YSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT                   0
+#define E4_YSTORM_ETH_CONN_AG_CTX_BIT1_MASK                    0x1
+#define E4_YSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT                   1
+#define E4_YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK       0x3
+#define E4_YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT      2
+#define E4_YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_MASK                0x3
+#define E4_YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_SHIFT       4
+#define E4_YSTORM_ETH_CONN_AG_CTX_CF2_MASK                     0x3
+#define E4_YSTORM_ETH_CONN_AG_CTX_CF2_SHIFT                    6
        u8 flags1;
-#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK       0x1
-#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT      0
-#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_MASK                0x1
-#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_SHIFT       1
-#define YSTORM_ETH_CONN_AG_CTX_CF2EN_MASK                      0x1
-#define YSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT                     2
-#define YSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK                    0x1
-#define YSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT                   3
-#define YSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK                    0x1
-#define YSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT                   4
-#define YSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK                    0x1
-#define YSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT                   5
-#define YSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK                    0x1
-#define YSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT                   6
-#define YSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK                    0x1
-#define YSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT                   7
+#define E4_YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK    0x1
+#define E4_YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT   0
+#define E4_YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_MASK     0x1
+#define E4_YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_SHIFT    1
+#define E4_YSTORM_ETH_CONN_AG_CTX_CF2EN_MASK                   0x1
+#define E4_YSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT                  2
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK                 0x1
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT                        3
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK                 0x1
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT                        4
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK                 0x1
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT                        5
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK                 0x1
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT                        6
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK                 0x1
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT                        7
        u8 tx_q0_int_coallecing_timeset;
        u8 byte3;
        __le16 word0;
@@ -4748,89 +5247,89 @@ struct ystorm_eth_conn_ag_ctx {
        __le32 reg3;
 };
 
-struct tstorm_eth_conn_ag_ctx {
+struct e4_tstorm_eth_conn_ag_ctx {
        u8 byte0;
        u8 byte1;
        u8 flags0;
-#define TSTORM_ETH_CONN_AG_CTX_BIT0_MASK               0x1
-#define TSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT              0
-#define TSTORM_ETH_CONN_AG_CTX_BIT1_MASK               0x1
-#define TSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT              1
-#define TSTORM_ETH_CONN_AG_CTX_BIT2_MASK               0x1
-#define TSTORM_ETH_CONN_AG_CTX_BIT2_SHIFT              2
-#define TSTORM_ETH_CONN_AG_CTX_BIT3_MASK               0x1
-#define TSTORM_ETH_CONN_AG_CTX_BIT3_SHIFT              3
-#define TSTORM_ETH_CONN_AG_CTX_BIT4_MASK               0x1
-#define TSTORM_ETH_CONN_AG_CTX_BIT4_SHIFT              4
-#define TSTORM_ETH_CONN_AG_CTX_BIT5_MASK               0x1
-#define TSTORM_ETH_CONN_AG_CTX_BIT5_SHIFT              5
-#define TSTORM_ETH_CONN_AG_CTX_CF0_MASK                        0x3
-#define TSTORM_ETH_CONN_AG_CTX_CF0_SHIFT               6
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT0_MASK    0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT   0
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT1_MASK    0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT   1
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT2_MASK    0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT2_SHIFT   2
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT3_MASK    0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT3_SHIFT   3
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT4_MASK    0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT4_SHIFT   4
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT5_MASK    0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT5_SHIFT   5
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF0_MASK     0x3
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF0_SHIFT    6
        u8 flags1;
-#define TSTORM_ETH_CONN_AG_CTX_CF1_MASK                        0x3
-#define TSTORM_ETH_CONN_AG_CTX_CF1_SHIFT               0
-#define TSTORM_ETH_CONN_AG_CTX_CF2_MASK                        0x3
-#define TSTORM_ETH_CONN_AG_CTX_CF2_SHIFT               2
-#define TSTORM_ETH_CONN_AG_CTX_CF3_MASK                        0x3
-#define TSTORM_ETH_CONN_AG_CTX_CF3_SHIFT               4
-#define TSTORM_ETH_CONN_AG_CTX_CF4_MASK                        0x3
-#define TSTORM_ETH_CONN_AG_CTX_CF4_SHIFT               6
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF1_MASK     0x3
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF1_SHIFT    0
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF2_MASK     0x3
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF2_SHIFT    2
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF3_MASK     0x3
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF3_SHIFT    4
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF4_MASK     0x3
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF4_SHIFT    6
        u8 flags2;
-#define TSTORM_ETH_CONN_AG_CTX_CF5_MASK                        0x3
-#define TSTORM_ETH_CONN_AG_CTX_CF5_SHIFT               0
-#define TSTORM_ETH_CONN_AG_CTX_CF6_MASK                        0x3
-#define TSTORM_ETH_CONN_AG_CTX_CF6_SHIFT               2
-#define TSTORM_ETH_CONN_AG_CTX_CF7_MASK                        0x3
-#define TSTORM_ETH_CONN_AG_CTX_CF7_SHIFT               4
-#define TSTORM_ETH_CONN_AG_CTX_CF8_MASK                        0x3
-#define TSTORM_ETH_CONN_AG_CTX_CF8_SHIFT               6
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF5_MASK     0x3
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF5_SHIFT    0
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF6_MASK     0x3
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF6_SHIFT    2
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF7_MASK     0x3
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF7_SHIFT    4
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF8_MASK     0x3
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF8_SHIFT    6
        u8 flags3;
-#define TSTORM_ETH_CONN_AG_CTX_CF9_MASK                        0x3
-#define TSTORM_ETH_CONN_AG_CTX_CF9_SHIFT               0
-#define TSTORM_ETH_CONN_AG_CTX_CF10_MASK               0x3
-#define TSTORM_ETH_CONN_AG_CTX_CF10_SHIFT              2
-#define TSTORM_ETH_CONN_AG_CTX_CF0EN_MASK              0x1
-#define TSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT             4
-#define TSTORM_ETH_CONN_AG_CTX_CF1EN_MASK              0x1
-#define TSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT             5
-#define TSTORM_ETH_CONN_AG_CTX_CF2EN_MASK              0x1
-#define TSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT             6
-#define TSTORM_ETH_CONN_AG_CTX_CF3EN_MASK              0x1
-#define TSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT             7
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF9_MASK     0x3
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF9_SHIFT    0
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF10_MASK    0x3
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF10_SHIFT   2
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF0EN_MASK   0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT  4
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF1EN_MASK   0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT  5
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF2EN_MASK   0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT  6
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF3EN_MASK   0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT  7
        u8 flags4;
-#define TSTORM_ETH_CONN_AG_CTX_CF4EN_MASK              0x1
-#define TSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT             0
-#define TSTORM_ETH_CONN_AG_CTX_CF5EN_MASK              0x1
-#define TSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT             1
-#define TSTORM_ETH_CONN_AG_CTX_CF6EN_MASK              0x1
-#define TSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT             2
-#define TSTORM_ETH_CONN_AG_CTX_CF7EN_MASK              0x1
-#define TSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT             3
-#define TSTORM_ETH_CONN_AG_CTX_CF8EN_MASK              0x1
-#define TSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT             4
-#define TSTORM_ETH_CONN_AG_CTX_CF9EN_MASK              0x1
-#define TSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT             5
-#define TSTORM_ETH_CONN_AG_CTX_CF10EN_MASK             0x1
-#define TSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT            6
-#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK            0x1
-#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT           7
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF4EN_MASK   0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT  0
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF5EN_MASK   0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT  1
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF6EN_MASK   0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT  2
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF7EN_MASK   0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT  3
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF8EN_MASK   0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT  4
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF9EN_MASK   0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT  5
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF10EN_MASK  0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 6
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT        7
        u8 flags5;
-#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK            0x1
-#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT           0
-#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK            0x1
-#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT           1
-#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK            0x1
-#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT           2
-#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK            0x1
-#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT           3
-#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK            0x1
-#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT           4
-#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_MASK           0x1
-#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_SHIFT          5
-#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK            0x1
-#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT           6
-#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_MASK            0x1
-#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT           7
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK         0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT                0
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK         0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT                1
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK         0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT                2
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK         0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT                3
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK         0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT                4
+#define E4_TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_MASK                0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_SHIFT       5
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK         0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT                6
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE8EN_MASK         0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT                7
        __le32 reg0;
        __le32 reg1;
        __le32 reg2;
@@ -4852,63 +5351,63 @@ struct tstorm_eth_conn_ag_ctx {
        __le32 reg10;
 };
 
-struct ustorm_eth_conn_ag_ctx {
+struct e4_ustorm_eth_conn_ag_ctx {
        u8 byte0;
        u8 byte1;
        u8 flags0;
-#define USTORM_ETH_CONN_AG_CTX_BIT0_MASK                       0x1
-#define USTORM_ETH_CONN_AG_CTX_BIT0_SHIFT                      0
-#define USTORM_ETH_CONN_AG_CTX_BIT1_MASK                       0x1
-#define USTORM_ETH_CONN_AG_CTX_BIT1_SHIFT                      1
-#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_MASK                0x3
-#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_SHIFT       2
-#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_MASK                0x3
-#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_SHIFT       4
-#define USTORM_ETH_CONN_AG_CTX_CF2_MASK                                0x3
-#define USTORM_ETH_CONN_AG_CTX_CF2_SHIFT                       6
+#define E4_USTORM_ETH_CONN_AG_CTX_BIT0_MASK                    0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_BIT0_SHIFT                   0
+#define E4_USTORM_ETH_CONN_AG_CTX_BIT1_MASK                    0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_BIT1_SHIFT                   1
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_MASK     0x3
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_SHIFT    2
+#define E4_USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_MASK     0x3
+#define E4_USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_SHIFT    4
+#define E4_USTORM_ETH_CONN_AG_CTX_CF2_MASK                     0x3
+#define E4_USTORM_ETH_CONN_AG_CTX_CF2_SHIFT                    6
        u8 flags1;
-#define USTORM_ETH_CONN_AG_CTX_CF3_MASK                                0x3
-#define USTORM_ETH_CONN_AG_CTX_CF3_SHIFT                       0
-#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_MASK                  0x3
-#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_SHIFT                 2
-#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_MASK                  0x3
-#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_SHIFT                 4
-#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK          0x3
-#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT         6
+#define E4_USTORM_ETH_CONN_AG_CTX_CF3_MASK                     0x3
+#define E4_USTORM_ETH_CONN_AG_CTX_CF3_SHIFT                    0
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_MASK               0x3
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_SHIFT              2
+#define E4_USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_MASK               0x3
+#define E4_USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_SHIFT              4
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK       0x3
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT      6
        u8 flags2;
-#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_MASK     0x1
-#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_SHIFT    0
-#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_MASK     0x1
-#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_SHIFT    1
-#define USTORM_ETH_CONN_AG_CTX_CF2EN_MASK                      0x1
-#define USTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT                     2
-#define USTORM_ETH_CONN_AG_CTX_CF3EN_MASK                      0x1
-#define USTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT                     3
-#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_MASK               0x1
-#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_SHIFT              4
-#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_MASK               0x1
-#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_SHIFT              5
-#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK       0x1
-#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT      6
-#define USTORM_ETH_CONN_AG_CTX_RULE0EN_MASK                    0x1
-#define USTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT                   7
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_MASK  0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_SHIFT 0
+#define E4_USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_MASK  0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_SHIFT 1
+#define E4_USTORM_ETH_CONN_AG_CTX_CF2EN_MASK                   0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT                  2
+#define E4_USTORM_ETH_CONN_AG_CTX_CF3EN_MASK                   0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT                  3
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_MASK            0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_SHIFT           4
+#define E4_USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_MASK            0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_SHIFT           5
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK    0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT   6
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE0EN_MASK                 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT                        7
        u8 flags3;
-#define USTORM_ETH_CONN_AG_CTX_RULE1EN_MASK                    0x1
-#define USTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT                   0
-#define USTORM_ETH_CONN_AG_CTX_RULE2EN_MASK                    0x1
-#define USTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT                   1
-#define USTORM_ETH_CONN_AG_CTX_RULE3EN_MASK                    0x1
-#define USTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT                   2
-#define USTORM_ETH_CONN_AG_CTX_RULE4EN_MASK                    0x1
-#define USTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT                   3
-#define USTORM_ETH_CONN_AG_CTX_RULE5EN_MASK                    0x1
-#define USTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT                   4
-#define USTORM_ETH_CONN_AG_CTX_RULE6EN_MASK                    0x1
-#define USTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT                   5
-#define USTORM_ETH_CONN_AG_CTX_RULE7EN_MASK                    0x1
-#define USTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT                   6
-#define USTORM_ETH_CONN_AG_CTX_RULE8EN_MASK                    0x1
-#define USTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT                   7
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT        0
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT        1
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT        2
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT        3
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT        4
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT        5
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT        6
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT        7
        u8 byte2;
        u8 byte3;
        __le16 word0;
@@ -4932,20 +5431,21 @@ struct mstorm_eth_conn_st_ctx {
 };
 
 /* eth connection context */
-struct eth_conn_context {
+struct e4_eth_conn_context {
        struct tstorm_eth_conn_st_ctx tstorm_st_context;
        struct regpair tstorm_st_padding[2];
        struct pstorm_eth_conn_st_ctx pstorm_st_context;
        struct xstorm_eth_conn_st_ctx xstorm_st_context;
-       struct xstorm_eth_conn_ag_ctx xstorm_ag_context;
+       struct e4_xstorm_eth_conn_ag_ctx xstorm_ag_context;
        struct ystorm_eth_conn_st_ctx ystorm_st_context;
-       struct ystorm_eth_conn_ag_ctx ystorm_ag_context;
-       struct tstorm_eth_conn_ag_ctx tstorm_ag_context;
-       struct ustorm_eth_conn_ag_ctx ustorm_ag_context;
+       struct e4_ystorm_eth_conn_ag_ctx ystorm_ag_context;
+       struct e4_tstorm_eth_conn_ag_ctx tstorm_ag_context;
+       struct e4_ustorm_eth_conn_ag_ctx ustorm_ag_context;
        struct ustorm_eth_conn_st_ctx ustorm_st_context;
        struct mstorm_eth_conn_st_ctx mstorm_st_context;
 };
 
+/* Ethernet filter types: mac/vlan/pair */
 enum eth_error_code {
        ETH_OK = 0x00,
        ETH_FILTERS_MAC_ADD_FAIL_FULL,
@@ -4972,6 +5472,7 @@ enum eth_error_code {
        MAX_ETH_ERROR_CODE
 };
 
+/* Opcodes for the event ring */
 enum eth_event_opcode {
        ETH_EVENT_UNUSED,
        ETH_EVENT_VPORT_START,
@@ -4983,13 +5484,14 @@ enum eth_event_opcode {
        ETH_EVENT_RX_QUEUE_UPDATE,
        ETH_EVENT_RX_QUEUE_STOP,
        ETH_EVENT_FILTERS_UPDATE,
-       ETH_EVENT_RESERVED,
-       ETH_EVENT_RESERVED2,
-       ETH_EVENT_RESERVED3,
+       ETH_EVENT_RX_ADD_OPENFLOW_FILTER,
+       ETH_EVENT_RX_DELETE_OPENFLOW_FILTER,
+       ETH_EVENT_RX_CREATE_OPENFLOW_ACTION,
        ETH_EVENT_RX_ADD_UDP_FILTER,
        ETH_EVENT_RX_DELETE_UDP_FILTER,
-       ETH_EVENT_RESERVED4,
-       ETH_EVENT_RESERVED5,
+       ETH_EVENT_RX_CREATE_GFT_ACTION,
+       ETH_EVENT_RX_GFT_UPDATE_FILTER,
+       ETH_EVENT_TX_QUEUE_UPDATE,
        MAX_ETH_EVENT_OPCODE
 };
 
@@ -5039,6 +5541,7 @@ enum eth_filter_type {
        MAX_ETH_FILTER_TYPE
 };
 
+/* Eth IPv4 Fragment Type */
 enum eth_ipv4_frag_type {
        ETH_IPV4_NOT_FRAG,
        ETH_IPV4_FIRST_FRAG,
@@ -5046,12 +5549,14 @@ enum eth_ipv4_frag_type {
        MAX_ETH_IPV4_FRAG_TYPE
 };
 
+/* eth IPv4 Fragment Type */
 enum eth_ip_type {
        ETH_IPV4,
        ETH_IPV6,
        MAX_ETH_IP_TYPE
 };
 
+/* Ethernet Ramrod Command IDs */
 enum eth_ramrod_cmd_id {
        ETH_RAMROD_UNUSED,
        ETH_RAMROD_VPORT_START,
@@ -5070,10 +5575,11 @@ enum eth_ramrod_cmd_id {
        ETH_RAMROD_RX_DELETE_UDP_FILTER,
        ETH_RAMROD_RX_CREATE_GFT_ACTION,
        ETH_RAMROD_GFT_UPDATE_FILTER,
+       ETH_RAMROD_TX_QUEUE_UPDATE,
        MAX_ETH_RAMROD_CMD_ID
 };
 
-/* return code from eth sp ramrods */
+/* Return code from eth sp ramrods */
 struct eth_return_code {
        u8 value;
 #define ETH_RETURN_CODE_ERR_CODE_MASK  0x1F
@@ -5209,18 +5715,14 @@ struct eth_vport_tx_mode {
        __le16 reserved2[3];
 };
 
+/* GFT filter update action type */
 enum gft_filter_update_action {
        GFT_ADD_FILTER,
        GFT_DELETE_FILTER,
        MAX_GFT_FILTER_UPDATE_ACTION
 };
 
-enum gft_logic_filter_type {
-       GFT_FILTER_TYPE,
-       RFS_FILTER_TYPE,
-       MAX_GFT_LOGIC_FILTER_TYPE
-};
-
+/* Ramrod data for rx add openflow filter */
 struct rx_add_openflow_filter_data {
        __le16 action_icid;
        u8 priority;
@@ -5244,11 +5746,13 @@ struct rx_add_openflow_filter_data {
        __le16 l4_src_port;
 };
 
+/* Ramrod data for rx create gft action */
 struct rx_create_gft_action_data {
        u8 vport_id;
        u8 reserved[7];
 };
 
+/* Ramrod data for rx create openflow action */
 struct rx_create_openflow_action_data {
        u8 vport_id;
        u8 reserved[7];
@@ -5286,7 +5790,7 @@ struct rx_queue_start_ramrod_data {
        struct regpair reserved2;
 };
 
-/* Ramrod data for rx queue start ramrod */
+/* Ramrod data for rx queue stop ramrod */
 struct rx_queue_stop_ramrod_data {
        __le16 rx_queue_id;
        u8 complete_cqe_flg;
@@ -5324,14 +5828,22 @@ struct rx_udp_filter_data {
        __le32 tenant_id;
 };
 
+/* Add or delete GFT filter - filter is packet header of type of packet wished
+ * to pass certain FW flow.
+ */
 struct rx_update_gft_filter_data {
        struct regpair pkt_hdr_addr;
        __le16 pkt_hdr_length;
-       __le16 rx_qid_or_action_icid;
-       u8 vport_id;
-       u8 filter_type;
+       __le16 action_icid;
+       __le16 rx_qid;
+       __le16 flow_id;
+       __le16 vport_id;
+       u8 action_icid_valid;
+       u8 rx_qid_valid;
+       u8 flow_id_valid;
        u8 filter_action;
        u8 assert_on_error;
+       u8 reserved;
 };
 
 /* Ramrod data for rx queue start ramrod */
@@ -5377,6 +5889,14 @@ struct tx_queue_stop_ramrod_data {
        __le16 reserved[4];
 };
 
+/* Ramrod data for tx queue update ramrod */
+struct tx_queue_update_ramrod_data {
+       __le16 update_qm_pq_id_flg;
+       __le16 qm_pq_id;
+       __le32 reserved0;
+       struct regpair reserved1[5];
+};
+
 /* Ramrod data for vport update ramrod */
 struct vport_filter_update_ramrod_data {
        struct eth_filter_cmd_header filter_cmd_hdr;
@@ -5477,219 +5997,219 @@ struct vport_update_ramrod_data {
        struct eth_vport_rss_config rss_config;
 };
 
-struct xstorm_eth_conn_agctxdq_ext_ldpart {
+struct e4_xstorm_eth_conn_ag_ctx_dq_ext_ldpart {
        u8 reserved0;
-       u8 eth_state;
+       u8 state;
        u8 flags0;
-#define XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM0_MASK        0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM0_SHIFT           0
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED1_MASK   0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED1_SHIFT              1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED2_MASK   0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED2_SHIFT              2
-#define XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM3_MASK        0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM3_SHIFT           3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED3_MASK   0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED3_SHIFT              4
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED4_MASK   0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED4_SHIFT              5
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED5_MASK   0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED5_SHIFT              6
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED6_MASK   0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED6_SHIFT              7
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM0_MASK      0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM0_SHIFT     0
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED1_MASK         0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED1_SHIFT                1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED2_MASK         0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED2_SHIFT                2
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM3_MASK      0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM3_SHIFT     3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED3_MASK         0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED3_SHIFT                4
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED4_MASK         0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED4_SHIFT                5
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED5_MASK         0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED5_SHIFT                6
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED6_MASK         0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED6_SHIFT                7
        u8 flags1;
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED7_MASK   0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED7_SHIFT              0
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED8_MASK   0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED8_SHIFT              1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED9_MASK   0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED9_SHIFT              2
-#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT11_MASK       0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT11_SHIFT                  3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT12_MASK       0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT12_SHIFT                  4
-#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT13_MASK       0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT13_SHIFT                  5
-#define XSTORMETHCONNAGCTXDQEXTLDPART_TX_RULE_ACTIVE_MASK      0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_TX_RULE_ACTIVE_SHIFT         6
-#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_ACTIVE_MASK        0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_ACTIVE_SHIFT           7
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED7_MASK         0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED7_SHIFT                0
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED8_MASK         0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED8_SHIFT                1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED9_MASK         0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED9_SHIFT                2
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_BIT11_MASK             0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_BIT11_SHIFT            3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_E5_RESERVED2_MASK      0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_E5_RESERVED2_SHIFT     4
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_E5_RESERVED3_MASK      0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_E5_RESERVED3_SHIFT     5
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TX_RULE_ACTIVE_MASK    0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TX_RULE_ACTIVE_SHIFT   6
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_ACTIVE_MASK      0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_ACTIVE_SHIFT     7
        u8 flags2;
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF0_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF0_SHIFT                    0
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF1_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF1_SHIFT                    2
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF2_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF2_SHIFT                    4
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF3_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF3_SHIFT                    6
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF0_MASK       0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF0_SHIFT      0
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF1_MASK       0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF1_SHIFT      2
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF2_MASK       0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF2_SHIFT      4
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF3_MASK       0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF3_SHIFT      6
        u8 flags3;
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF4_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF4_SHIFT                    0
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF5_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF5_SHIFT                    2
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF6_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF6_SHIFT                    4
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF7_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF7_SHIFT                    6
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF4_MASK       0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF4_SHIFT      0
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF5_MASK       0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF5_SHIFT      2
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF6_MASK       0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF6_SHIFT      4
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF7_MASK       0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF7_SHIFT      6
        u8 flags4;
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF8_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF8_SHIFT                    0
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF9_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF9_SHIFT                    2
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF10_MASK        0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF10_SHIFT                   4
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF11_MASK        0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF11_SHIFT                   6
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF8_MASK       0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF8_SHIFT      0
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF9_MASK       0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF9_SHIFT      2
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF10_MASK      0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF10_SHIFT     4
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF11_MASK      0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF11_SHIFT     6
        u8 flags5;
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF12_MASK        0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF12_SHIFT                   0
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF13_MASK        0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF13_SHIFT                   2
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF14_MASK        0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF14_SHIFT                   4
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF15_MASK        0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF15_SHIFT                   6
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF12_MASK      0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF12_SHIFT     0
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF13_MASK      0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF13_SHIFT     2
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF14_MASK      0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF14_SHIFT     4
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF15_MASK      0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF15_SHIFT     6
        u8 flags6;
-#define XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_MASK    0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_SHIFT       0
-#define XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_MASK    0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_SHIFT       2
-#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_MASK       0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_SHIFT                  4
-#define XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_MASK        0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_SHIFT           6
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_MASK  0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_SHIFT 0
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_MASK  0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_SHIFT 2
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_MASK             0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_SHIFT            4
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_MASK      0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_SHIFT     6
        u8 flags7;
-#define XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_MASK    0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_SHIFT               0
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED10_MASK  0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED10_SHIFT             2
-#define XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_MASK   0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_SHIFT              4
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF0EN_MASK       0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF0EN_SHIFT                  6
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF1EN_MASK       0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF1EN_SHIFT                  7
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_MASK          0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_SHIFT         0
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED10_MASK                0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED10_SHIFT       2
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_MASK         0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_SHIFT                4
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF0EN_MASK             0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF0EN_SHIFT            6
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF1EN_MASK             0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF1EN_SHIFT            7
        u8 flags8;
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF2EN_MASK       0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF2EN_SHIFT                  0
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF3EN_MASK       0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF3EN_SHIFT                  1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF4EN_MASK       0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF4EN_SHIFT                  2
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF5EN_MASK       0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF5EN_SHIFT                  3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF6EN_MASK       0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF6EN_SHIFT                  4
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF7EN_MASK       0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF7EN_SHIFT                  5
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF8EN_MASK       0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF8EN_SHIFT                  6
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF9EN_MASK       0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF9EN_SHIFT                  7
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF2EN_MASK     0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF2EN_SHIFT    0
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF3EN_MASK     0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF3EN_SHIFT    1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF4EN_MASK     0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF4EN_SHIFT    2
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF5EN_MASK     0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF5EN_SHIFT    3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF6EN_MASK     0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF6EN_SHIFT    4
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF7EN_MASK     0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF7EN_SHIFT    5
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF8EN_MASK     0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF8EN_SHIFT    6
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF9EN_MASK     0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF9EN_SHIFT    7
        u8 flags9;
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF10EN_MASK      0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF10EN_SHIFT                 0
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF11EN_MASK      0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF11EN_SHIFT                 1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF12EN_MASK      0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF12EN_SHIFT                 2
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF13EN_MASK      0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF13EN_SHIFT                 3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF14EN_MASK      0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF14EN_SHIFT                 4
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF15EN_MASK      0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF15EN_SHIFT                 5
-#define XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_EN_SHIFT    6
-#define XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_EN_SHIFT    7
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF10EN_MASK                    0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF10EN_SHIFT                   0
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF11EN_MASK                    0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF11EN_SHIFT                   1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF12EN_MASK                    0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF12EN_SHIFT                   2
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF13EN_MASK                    0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF13EN_SHIFT                   3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF14EN_MASK                    0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF14EN_SHIFT                   4
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF15EN_MASK                    0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF15EN_SHIFT                   5
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_EN_MASK       0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_EN_SHIFT      6
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_EN_MASK       0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_EN_SHIFT      7
        u8 flags10;
-#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_EN_MASK    0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_EN_SHIFT               0
-#define XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_EN_MASK     0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_EN_SHIFT        1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_EN_SHIFT            2
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED11_MASK  0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED11_SHIFT             3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_EN_MASK        0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_EN_SHIFT           4
-#define XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_EN_RESERVED_MASK      0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_EN_RESERVED_SHIFT 5
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED12_MASK  0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED12_SHIFT             6
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED13_MASK  0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED13_SHIFT             7
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_EN_MASK                  0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_EN_SHIFT                 0
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_EN_MASK           0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_EN_SHIFT          1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_EN_MASK               0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_EN_SHIFT              2
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED11_MASK                        0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED11_SHIFT               3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_EN_MASK              0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_EN_SHIFT             4
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_EN_RESERVED_MASK    0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_EN_RESERVED_SHIFT   5
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED12_MASK                        0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED12_SHIFT               6
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED13_MASK                        0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED13_SHIFT               7
        u8 flags11;
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED14_MASK  0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED14_SHIFT             0
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED15_MASK  0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED15_SHIFT             1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_TX_DEC_RULE_EN_MASK      0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_TX_DEC_RULE_EN_SHIFT         2
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE5EN_MASK     0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE5EN_SHIFT                3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE6EN_MASK     0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE6EN_SHIFT                4
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE7EN_MASK     0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE7EN_SHIFT                5
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED1_MASK        0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED1_SHIFT           6
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE9EN_MASK     0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE9EN_SHIFT                7
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED14_MASK                0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED14_SHIFT       0
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED15_MASK                0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED15_SHIFT       1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TX_DEC_RULE_EN_MASK    0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TX_DEC_RULE_EN_SHIFT   2
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE5EN_MASK           0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE5EN_SHIFT          3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE6EN_MASK           0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE6EN_SHIFT          4
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE7EN_MASK           0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE7EN_SHIFT          5
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED1_MASK      0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED1_SHIFT     6
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE9EN_MASK           0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE9EN_SHIFT          7
        u8 flags12;
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE10EN_MASK    0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE10EN_SHIFT               0
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE11EN_MASK    0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE11EN_SHIFT               1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED2_MASK        0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED2_SHIFT           2
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED3_MASK        0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED3_SHIFT           3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE14EN_MASK    0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE14EN_SHIFT               4
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE15EN_MASK    0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE15EN_SHIFT               5
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE16EN_MASK    0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE16EN_SHIFT               6
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE17EN_MASK    0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE17EN_SHIFT               7
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE10EN_MASK          0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE10EN_SHIFT         0
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE11EN_MASK          0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE11EN_SHIFT         1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED2_MASK      0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED2_SHIFT     2
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED3_MASK      0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED3_SHIFT     3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE14EN_MASK          0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE14EN_SHIFT         4
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE15EN_MASK          0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE15EN_SHIFT         5
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE16EN_MASK          0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE16EN_SHIFT         6
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE17EN_MASK          0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE17EN_SHIFT         7
        u8 flags13;
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE18EN_MASK    0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE18EN_SHIFT               0
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE19EN_MASK    0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE19EN_SHIFT               1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED4_MASK        0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED4_SHIFT           2
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED5_MASK        0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED5_SHIFT           3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED6_MASK        0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED6_SHIFT           4
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED7_MASK        0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED7_SHIFT           5
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED8_MASK        0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED8_SHIFT           6
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED9_MASK        0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED9_SHIFT           7
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE18EN_MASK          0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE18EN_SHIFT         0
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE19EN_MASK          0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE19EN_SHIFT         1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED4_MASK      0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED4_SHIFT     2
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED5_MASK      0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED5_SHIFT     3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED6_MASK      0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED6_SHIFT     4
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED7_MASK      0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED7_SHIFT     5
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED8_MASK      0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED8_SHIFT     6
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED9_MASK      0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED9_SHIFT     7
        u8 flags14;
-#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_USE_EXT_HDR_MASK    0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_USE_EXT_HDR_SHIFT       0
-#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_RAW_L3L4_MASK  0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_RAW_L3L4_SHIFT     1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_INBAND_PROP_HDR_MASK        0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_INBAND_PROP_HDR_SHIFT   2
-#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_EXT_TUNNEL_MASK        0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_EXT_TUNNEL_SHIFT   3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_L2_EDPM_ENABLE_MASK      0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_L2_EDPM_ENABLE_SHIFT         4
-#define XSTORMETHCONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_MASK    0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_SHIFT       5
-#define XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_MASK  0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_SHIFT             6
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_USE_EXT_HDR_MASK          0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_USE_EXT_HDR_SHIFT         0
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_RAW_L3L4_MASK                0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_RAW_L3L4_SHIFT       1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_INBAND_PROP_HDR_MASK      0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_INBAND_PROP_HDR_SHIFT     2
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_EXT_TUNNEL_MASK      0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_EXT_TUNNEL_SHIFT     3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_L2_EDPM_ENABLE_MASK            0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_L2_EDPM_ENABLE_SHIFT           4
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_MASK          0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_SHIFT         5
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_MASK                        0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_SHIFT               6
        u8 edpm_event_id;
        __le16 physical_q0;
-       __le16 ereserved1;
+       __le16 e5_reserved1;
        __le16 edpm_num_bds;
        __le16 tx_bd_cons;
        __le16 tx_bd_prod;
@@ -5706,256 +6226,256 @@ struct xstorm_eth_conn_agctxdq_ext_ldpart {
        __le32 reg4;
 };
 
-struct mstorm_eth_conn_ag_ctx {
+struct e4_mstorm_eth_conn_ag_ctx {
        u8 byte0;
        u8 byte1;
        u8 flags0;
-#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK       0x1
-#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define MSTORM_ETH_CONN_AG_CTX_BIT1_MASK       0x1
-#define MSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT         1
-#define MSTORM_ETH_CONN_AG_CTX_CF0_MASK        0x3
-#define MSTORM_ETH_CONN_AG_CTX_CF0_SHIFT          2
-#define MSTORM_ETH_CONN_AG_CTX_CF1_MASK        0x3
-#define MSTORM_ETH_CONN_AG_CTX_CF1_SHIFT          4
-#define MSTORM_ETH_CONN_AG_CTX_CF2_MASK        0x3
-#define MSTORM_ETH_CONN_AG_CTX_CF2_SHIFT          6
+#define E4_MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK    0x1
+#define E4_MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT    0
+#define E4_MSTORM_ETH_CONN_AG_CTX_BIT1_MASK            0x1
+#define E4_MSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT           1
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF0_MASK             0x3
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF0_SHIFT            2
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF1_MASK             0x3
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF1_SHIFT            4
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF2_MASK             0x3
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF2_SHIFT            6
        u8 flags1;
-#define MSTORM_ETH_CONN_AG_CTX_CF0EN_MASK      0x1
-#define MSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT        0
-#define MSTORM_ETH_CONN_AG_CTX_CF1EN_MASK      0x1
-#define MSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT        1
-#define MSTORM_ETH_CONN_AG_CTX_CF2EN_MASK      0x1
-#define MSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT        2
-#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK    0x1
-#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT      3
-#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK    0x1
-#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT      4
-#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK    0x1
-#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT      5
-#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK    0x1
-#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT      6
-#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK    0x1
-#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT      7
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF0EN_MASK   0x1
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT  0
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF1EN_MASK   0x1
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT  1
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF2EN_MASK   0x1
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT  2
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT        3
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT        4
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT        5
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT        6
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT        7
        __le16 word0;
        __le16 word1;
        __le32 reg0;
        __le32 reg1;
 };
 
-struct xstorm_eth_hw_conn_ag_ctx {
+struct e4_xstorm_eth_hw_conn_ag_ctx {
        u8 reserved0;
-       u8 eth_state;
+       u8 state;
        u8 flags0;
-#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_MASK    0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_SHIFT           0
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_MASK       0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_SHIFT              1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_MASK       0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_SHIFT              2
-#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_MASK    0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_SHIFT           3
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_MASK       0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_SHIFT              4
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_MASK       0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_SHIFT              5
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_MASK       0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_SHIFT              6
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_MASK       0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_SHIFT              7
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_SHIFT        0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_MASK    0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_SHIFT   1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_MASK    0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_SHIFT   2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_SHIFT        3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_MASK    0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_SHIFT   4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_MASK    0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_SHIFT   5
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_MASK    0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_SHIFT   6
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_MASK    0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_SHIFT   7
        u8 flags1;
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_MASK       0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_SHIFT              0
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_MASK       0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_SHIFT              1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_MASK       0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_SHIFT              2
-#define XSTORM_ETH_HW_CONN_AG_CTX_BIT11_MASK   0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_BIT11_SHIFT                  3
-#define XSTORM_ETH_HW_CONN_AG_CTX_BIT12_MASK   0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_BIT12_SHIFT                  4
-#define XSTORM_ETH_HW_CONN_AG_CTX_BIT13_MASK   0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_BIT13_SHIFT                  5
-#define XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_MASK  0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT         6
-#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_MASK    0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT           7
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_MASK            0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_SHIFT           0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_MASK            0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_SHIFT           1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_MASK            0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_SHIFT           2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_BIT11_MASK                        0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_BIT11_SHIFT               3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED2_MASK         0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED2_SHIFT                4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED3_MASK         0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED3_SHIFT                5
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_MASK       0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT      6
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_MASK         0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT                7
        u8 flags2;
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF0_MASK     0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF0_SHIFT                    0
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF1_MASK     0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF1_SHIFT                    2
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF2_MASK     0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF2_SHIFT                    4
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF3_MASK     0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF3_SHIFT                    6
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF0_MASK  0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF0_SHIFT 0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF1_MASK  0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF1_SHIFT 2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF2_MASK  0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF2_SHIFT 4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF3_MASK  0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF3_SHIFT 6
        u8 flags3;
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF4_MASK     0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF4_SHIFT                    0
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF5_MASK     0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF5_SHIFT                    2
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF6_MASK     0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF6_SHIFT                    4
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF7_MASK     0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF7_SHIFT                    6
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF4_MASK  0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF4_SHIFT 0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF5_MASK  0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF5_SHIFT 2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF6_MASK  0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF6_SHIFT 4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF7_MASK  0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF7_SHIFT 6
        u8 flags4;
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF8_MASK     0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF8_SHIFT                    0
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF9_MASK     0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF9_SHIFT                    2
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF10_MASK    0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF10_SHIFT                   4
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF11_MASK    0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF11_SHIFT                   6
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF8_MASK  0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF8_SHIFT 0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF9_MASK  0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF9_SHIFT 2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF10_MASK 0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF10_SHIFT        4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF11_MASK 0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF11_SHIFT        6
        u8 flags5;
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF12_MASK    0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF12_SHIFT                   0
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF13_MASK    0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF13_SHIFT                   2
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF14_MASK    0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF14_SHIFT                   4
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF15_MASK    0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF15_SHIFT                   6
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF12_MASK 0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF12_SHIFT        0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF13_MASK 0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF13_SHIFT        2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF14_MASK 0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF14_SHIFT        4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF15_MASK 0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF15_SHIFT        6
        u8 flags6;
-#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK        0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT       0
-#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_MASK        0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT       2
-#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_MASK   0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_SHIFT                  4
-#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_MASK    0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_SHIFT           6
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK     0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT    0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_MASK     0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT    2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_MASK                        0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_SHIFT               4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_MASK         0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_SHIFT                6
        u8 flags7;
-#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_MASK        0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_SHIFT               0
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_MASK      0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_SHIFT             2
-#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_MASK       0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_SHIFT              4
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_MASK   0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_SHIFT                  6
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_MASK   0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_SHIFT                  7
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_MASK     0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_SHIFT    0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_MASK   0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_SHIFT  2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_MASK    0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_SHIFT   4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_MASK                0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_SHIFT       6
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_MASK                0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_SHIFT       7
        u8 flags8;
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_MASK   0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_SHIFT                  0
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_MASK   0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_SHIFT                  1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_MASK   0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_SHIFT                  2
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_MASK   0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_SHIFT                  3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_MASK   0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_SHIFT                  4
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_MASK   0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_SHIFT                  5
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_MASK   0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_SHIFT                  6
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_MASK   0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_SHIFT                  7
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_MASK                0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_SHIFT       0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_MASK                0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_SHIFT       1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_MASK                0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_SHIFT       2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_MASK                0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_SHIFT       3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_MASK                0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_SHIFT       4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_MASK                0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_SHIFT       5
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_MASK                0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_SHIFT       6
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_MASK                0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_SHIFT       7
        u8 flags9;
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_MASK  0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_SHIFT                 0
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_MASK  0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_SHIFT                 1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_MASK  0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_SHIFT                 2
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_MASK  0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_SHIFT                 3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_MASK  0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_SHIFT                 4
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_MASK  0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_SHIFT                 5
-#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK     0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT    6
-#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK     0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT    7
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_MASK               0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_SHIFT              0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_MASK               0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_SHIFT              1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_MASK               0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_SHIFT              2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_MASK               0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_SHIFT              3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_MASK               0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_SHIFT              4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_MASK               0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_SHIFT              5
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK  0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK  0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7
        u8 flags10;
-#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_MASK        0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_SHIFT               0
-#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT        1
-#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_MASK     0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT            2
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_MASK      0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_SHIFT             3
-#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_MASK    0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_SHIFT           4
-#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK  0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_MASK      0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_SHIFT             6
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_MASK      0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_SHIFT             7
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_MASK                     0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_SHIFT                    0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_MASK              0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT             1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_MASK                  0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT                 2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_MASK                   0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_SHIFT                  3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_MASK                 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_SHIFT                        4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK       0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT      5
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_MASK                   0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_SHIFT                  6
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_MASK                   0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_SHIFT                  7
        u8 flags11;
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_MASK      0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_SHIFT             0
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_MASK      0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_SHIFT             1
-#define XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_MASK  0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT         2
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_SHIFT                3
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_SHIFT                4
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_SHIFT                5
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_MASK    0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_SHIFT           6
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_SHIFT                7
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_MASK           0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_SHIFT          0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_MASK           0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_SHIFT          1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_MASK       0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT      2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_MASK              0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_SHIFT             3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_MASK              0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_SHIFT             4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_MASK              0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_SHIFT             5
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_MASK         0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_SHIFT                6
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_MASK              0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_SHIFT             7
        u8 flags12;
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_MASK        0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_SHIFT               0
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_MASK        0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_SHIFT               1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_MASK    0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_SHIFT           2
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_MASK    0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_SHIFT           3
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_MASK        0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_SHIFT               4
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_MASK        0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_SHIFT               5
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_MASK        0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_SHIFT               6
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_MASK        0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_SHIFT               7
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_MASK     0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_SHIFT    0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_MASK     0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_SHIFT    1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_SHIFT        2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_SHIFT        3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_MASK     0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_SHIFT    4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_MASK     0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_SHIFT    5
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_MASK     0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_SHIFT    6
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_MASK     0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_SHIFT    7
        u8 flags13;
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_MASK        0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_SHIFT               0
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_MASK        0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_SHIFT               1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_MASK    0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_SHIFT           2
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_MASK    0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_SHIFT           3
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_MASK    0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_SHIFT           4
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_MASK    0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_SHIFT           5
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_MASK    0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_SHIFT           6
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_MASK    0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_SHIFT           7
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_MASK     0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_SHIFT    0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_MASK     0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_SHIFT    1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_SHIFT        2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_SHIFT        3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_SHIFT        4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_SHIFT        5
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_SHIFT        6
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_SHIFT        7
        u8 flags14;
-#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK        0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT       0
-#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK      0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT     1
-#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK    0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT   2
-#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK    0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT   3
-#define XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_MASK  0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT         4
-#define XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK        0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT       5
-#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_MASK      0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_SHIFT             6
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK     0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT    0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK   0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT  1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT        2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT        3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_MASK       0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT      4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK     0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT    5
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_MASK           0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_SHIFT          6
        u8 edpm_event_id;
        __le16 physical_q0;
-       __le16 ereserved1;
+       __le16 e5_reserved1;
        __le16 edpm_num_bds;
        __le16 tx_bd_cons;
        __le16 tx_bd_prod;
@@ -5963,6 +6483,7 @@ struct xstorm_eth_hw_conn_ag_ctx {
        __le16 conn_dpi;
 };
 
+/* GFT CAM line struct */
 struct gft_cam_line {
        __le32 camline;
 #define GFT_CAM_LINE_VALID_MASK                0x1
@@ -5975,6 +6496,7 @@ struct gft_cam_line {
 #define GFT_CAM_LINE_RESERVED1_SHIFT   29
 };
 
+/* GFT CAM line struct with fields breakout */
 struct gft_cam_line_mapped {
        __le32 camline;
 #define GFT_CAM_LINE_MAPPED_VALID_MASK                         0x1
@@ -6008,28 +6530,31 @@ union gft_cam_line_union {
        struct gft_cam_line_mapped cam_line_mapped;
 };
 
+/* Used in gft_profile_key: Indication for ip version */
 enum gft_profile_ip_version {
        GFT_PROFILE_IPV4 = 0,
        GFT_PROFILE_IPV6 = 1,
        MAX_GFT_PROFILE_IP_VERSION
 };
 
+/* Profile key stucr fot GFT logic in Prs */
 struct gft_profile_key {
        __le16 profile_key;
-#define GFT_PROFILE_KEY_IP_VERSION_MASK           0x1
-#define GFT_PROFILE_KEY_IP_VERSION_SHIFT          0
-#define GFT_PROFILE_KEY_TUNNEL_IP_VERSION_MASK    0x1
-#define GFT_PROFILE_KEY_TUNNEL_IP_VERSION_SHIFT   1
-#define GFT_PROFILE_KEY_UPPER_PROTOCOL_TYPE_MASK  0xF
-#define GFT_PROFILE_KEY_UPPER_PROTOCOL_TYPE_SHIFT 2
-#define GFT_PROFILE_KEY_TUNNEL_TYPE_MASK          0xF
-#define GFT_PROFILE_KEY_TUNNEL_TYPE_SHIFT         6
-#define GFT_PROFILE_KEY_PF_ID_MASK                0xF
-#define GFT_PROFILE_KEY_PF_ID_SHIFT               10
-#define GFT_PROFILE_KEY_RESERVED0_MASK            0x3
-#define GFT_PROFILE_KEY_RESERVED0_SHIFT           14
-};
-
+#define GFT_PROFILE_KEY_IP_VERSION_MASK                        0x1
+#define GFT_PROFILE_KEY_IP_VERSION_SHIFT               0
+#define GFT_PROFILE_KEY_TUNNEL_IP_VERSION_MASK         0x1
+#define GFT_PROFILE_KEY_TUNNEL_IP_VERSION_SHIFT                1
+#define GFT_PROFILE_KEY_UPPER_PROTOCOL_TYPE_MASK       0xF
+#define GFT_PROFILE_KEY_UPPER_PROTOCOL_TYPE_SHIFT      2
+#define GFT_PROFILE_KEY_TUNNEL_TYPE_MASK               0xF
+#define GFT_PROFILE_KEY_TUNNEL_TYPE_SHIFT              6
+#define GFT_PROFILE_KEY_PF_ID_MASK                     0xF
+#define GFT_PROFILE_KEY_PF_ID_SHIFT                    10
+#define GFT_PROFILE_KEY_RESERVED0_MASK                 0x3
+#define GFT_PROFILE_KEY_RESERVED0_SHIFT                        14
+};
+
+/* Used in gft_profile_key: Indication for tunnel type */
 enum gft_profile_tunnel_type {
        GFT_PROFILE_NO_TUNNEL = 0,
        GFT_PROFILE_VXLAN_TUNNEL = 1,
@@ -6040,6 +6565,7 @@ enum gft_profile_tunnel_type {
        MAX_GFT_PROFILE_TUNNEL_TYPE
 };
 
+/* Used in gft_profile_key: Indication for protocol type */
 enum gft_profile_upper_protocol_type {
        GFT_PROFILE_ROCE_PROTOCOL = 0,
        GFT_PROFILE_RROCE_PROTOCOL = 1,
@@ -6060,6 +6586,7 @@ enum gft_profile_upper_protocol_type {
        MAX_GFT_PROFILE_UPPER_PROTOCOL_TYPE
 };
 
+/* GFT RAM line struct */
 struct gft_ram_line {
        __le32 lo;
 #define GFT_RAM_LINE_VLAN_SELECT_MASK                  0x3
@@ -6149,6 +6676,7 @@ struct gft_ram_line {
 #define GFT_RAM_LINE_RESERVED1_SHIFT                   10
 };
 
+/* Used in the first 2 bits for gft_ram_line: Indication for vlan mask */
 enum gft_vlan_select {
        INNER_PROVIDER_VLAN = 0,
        INNER_VLAN = 1,
@@ -6157,10 +6685,205 @@ enum gft_vlan_select {
        MAX_GFT_VLAN_SELECT
 };
 
+/* The rdma task context of Mstorm */
+struct ystorm_rdma_task_st_ctx {
+       struct regpair temp[4];
+};
+
+struct e4_ystorm_rdma_task_ag_ctx {
+       u8 reserved;
+       u8 byte1;
+       __le16 msem_ctx_upd_seq;
+       u8 flags0;
+#define E4_YSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK                0xF
+#define E4_YSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT       0
+#define E4_YSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK           0x1
+#define E4_YSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT          4
+#define E4_YSTORM_RDMA_TASK_AG_CTX_BIT1_MASK                   0x1
+#define E4_YSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT                  5
+#define E4_YSTORM_RDMA_TASK_AG_CTX_VALID_MASK                  0x1
+#define E4_YSTORM_RDMA_TASK_AG_CTX_VALID_SHIFT                 6
+#define E4_YSTORM_RDMA_TASK_AG_CTX_BIT3_MASK                   0x1
+#define E4_YSTORM_RDMA_TASK_AG_CTX_BIT3_SHIFT                  7
+       u8 flags1;
+#define E4_YSTORM_RDMA_TASK_AG_CTX_CF0_MASK            0x3
+#define E4_YSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT           0
+#define E4_YSTORM_RDMA_TASK_AG_CTX_CF1_MASK            0x3
+#define E4_YSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT           2
+#define E4_YSTORM_RDMA_TASK_AG_CTX_CF2SPECIAL_MASK     0x3
+#define E4_YSTORM_RDMA_TASK_AG_CTX_CF2SPECIAL_SHIFT    4
+#define E4_YSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK          0x1
+#define E4_YSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT         6
+#define E4_YSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK          0x1
+#define E4_YSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT         7
+       u8 flags2;
+#define E4_YSTORM_RDMA_TASK_AG_CTX_BIT4_MASK           0x1
+#define E4_YSTORM_RDMA_TASK_AG_CTX_BIT4_SHIFT          0
+#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK                0x1
+#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT       1
+#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK                0x1
+#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT       2
+#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK                0x1
+#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT       3
+#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK                0x1
+#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT       4
+#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK                0x1
+#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT       5
+#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK                0x1
+#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT       6
+#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK                0x1
+#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT       7
+       u8 key;
+       __le32 mw_cnt;
+       u8 ref_cnt_seq;
+       u8 ctx_upd_seq;
+       __le16 dif_flags;
+       __le16 tx_ref_count;
+       __le16 last_used_ltid;
+       __le16 parent_mr_lo;
+       __le16 parent_mr_hi;
+       __le32 fbo_lo;
+       __le32 fbo_hi;
+};
+
+struct e4_mstorm_rdma_task_ag_ctx {
+       u8 reserved;
+       u8 byte1;
+       __le16 icid;
+       u8 flags0;
+#define E4_MSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK                0xF
+#define E4_MSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT       0
+#define E4_MSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK           0x1
+#define E4_MSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT          4
+#define E4_MSTORM_RDMA_TASK_AG_CTX_BIT1_MASK                   0x1
+#define E4_MSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT                  5
+#define E4_MSTORM_RDMA_TASK_AG_CTX_BIT2_MASK                   0x1
+#define E4_MSTORM_RDMA_TASK_AG_CTX_BIT2_SHIFT                  6
+#define E4_MSTORM_RDMA_TASK_AG_CTX_BIT3_MASK                   0x1
+#define E4_MSTORM_RDMA_TASK_AG_CTX_BIT3_SHIFT                  7
+       u8 flags1;
+#define E4_MSTORM_RDMA_TASK_AG_CTX_CF0_MASK    0x3
+#define E4_MSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT   0
+#define E4_MSTORM_RDMA_TASK_AG_CTX_CF1_MASK    0x3
+#define E4_MSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT   2
+#define E4_MSTORM_RDMA_TASK_AG_CTX_CF2_MASK    0x3
+#define E4_MSTORM_RDMA_TASK_AG_CTX_CF2_SHIFT   4
+#define E4_MSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK  0x1
+#define E4_MSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 6
+#define E4_MSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK  0x1
+#define E4_MSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 7
+       u8 flags2;
+#define E4_MSTORM_RDMA_TASK_AG_CTX_CF2EN_MASK          0x1
+#define E4_MSTORM_RDMA_TASK_AG_CTX_CF2EN_SHIFT         0
+#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK                0x1
+#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT       1
+#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK                0x1
+#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT       2
+#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK                0x1
+#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT       3
+#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK                0x1
+#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT       4
+#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK                0x1
+#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT       5
+#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK                0x1
+#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT       6
+#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK                0x1
+#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT       7
+       u8 key;
+       __le32 mw_cnt;
+       u8 ref_cnt_seq;
+       u8 ctx_upd_seq;
+       __le16 dif_flags;
+       __le16 tx_ref_count;
+       __le16 last_used_ltid;
+       __le16 parent_mr_lo;
+       __le16 parent_mr_hi;
+       __le32 fbo_lo;
+       __le32 fbo_hi;
+};
+
+/* The roce task context of Mstorm */
 struct mstorm_rdma_task_st_ctx {
        struct regpair temp[4];
 };
 
+/* The roce task context of Ustorm */
+struct ustorm_rdma_task_st_ctx {
+       struct regpair temp[2];
+};
+
+struct e4_ustorm_rdma_task_ag_ctx {
+       u8 reserved;
+       u8 byte1;
+       __le16 icid;
+       u8 flags0;
+#define E4_USTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK                0xF
+#define E4_USTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT       0
+#define E4_USTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK           0x1
+#define E4_USTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT          4
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_RUNT_VALID_MASK         0x1
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_RUNT_VALID_SHIFT                5
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_MASK    0x3
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_SHIFT   6
+       u8 flags1;
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_RESULT_TOGGLE_BIT_MASK  0x3
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_RESULT_TOGGLE_BIT_SHIFT 0
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_TX_IO_FLG_MASK          0x3
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_TX_IO_FLG_SHIFT         2
+#define E4_USTORM_RDMA_TASK_AG_CTX_CF3_MASK                    0x3
+#define E4_USTORM_RDMA_TASK_AG_CTX_CF3_SHIFT                   4
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_MASK           0x3
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_SHIFT          6
+       u8 flags2;
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_EN_MASK 0x1
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_EN_SHIFT        0
+#define E4_USTORM_RDMA_TASK_AG_CTX_RESERVED2_MASK              0x1
+#define E4_USTORM_RDMA_TASK_AG_CTX_RESERVED2_SHIFT             1
+#define E4_USTORM_RDMA_TASK_AG_CTX_RESERVED3_MASK              0x1
+#define E4_USTORM_RDMA_TASK_AG_CTX_RESERVED3_SHIFT             2
+#define E4_USTORM_RDMA_TASK_AG_CTX_CF3EN_MASK                  0x1
+#define E4_USTORM_RDMA_TASK_AG_CTX_CF3EN_SHIFT                 3
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK                0x1
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT       4
+#define E4_USTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK                        0x1
+#define E4_USTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT               5
+#define E4_USTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK                        0x1
+#define E4_USTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT               6
+#define E4_USTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK                        0x1
+#define E4_USTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT               7
+       u8 flags3;
+#define E4_USTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK                0x1
+#define E4_USTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT       0
+#define E4_USTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK                0x1
+#define E4_USTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT       1
+#define E4_USTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK                0x1
+#define E4_USTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT       2
+#define E4_USTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK                0x1
+#define E4_USTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT       3
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT        4
+       __le32 dif_err_intervals;
+       __le32 dif_error_1st_interval;
+       __le32 reg2;
+       __le32 dif_runt_value;
+       __le32 reg4;
+       __le32 reg5;
+};
+
+/* RDMA task context */
+struct e4_rdma_task_context {
+       struct ystorm_rdma_task_st_ctx ystorm_st_context;
+       struct e4_ystorm_rdma_task_ag_ctx ystorm_ag_context;
+       struct tdif_task_context tdif_context;
+       struct e4_mstorm_rdma_task_ag_ctx mstorm_ag_context;
+       struct mstorm_rdma_task_st_ctx mstorm_st_context;
+       struct rdif_task_context rdif_context;
+       struct ustorm_rdma_task_st_ctx ustorm_st_context;
+       struct regpair ustorm_st_padding[2];
+       struct e4_ustorm_rdma_task_ag_ctx ustorm_ag_context;
+};
+
+/* rdma function init ramrod data */
 struct rdma_close_func_ramrod_data {
        u8 cnq_start_offset;
        u8 num_cnqs;
@@ -6169,6 +6892,7 @@ struct rdma_close_func_ramrod_data {
        u8 reserved[4];
 };
 
+/* rdma function init CNQ parameters */
 struct rdma_cnq_params {
        __le16 sb_num;
        u8 sb_index;
@@ -6179,6 +6903,7 @@ struct rdma_cnq_params {
        u8 reserved1[6];
 };
 
+/* rdma create cq ramrod data */
 struct rdma_create_cq_ramrod_data {
        struct regpair cq_handle;
        struct regpair pbl_addr;
@@ -6193,21 +6918,25 @@ struct rdma_create_cq_ramrod_data {
        __le16 reserved1;
 };
 
+/* rdma deregister tid ramrod data */
 struct rdma_deregister_tid_ramrod_data {
        __le32 itid;
        __le32 reserved;
 };
 
+/* rdma destroy cq output params */
 struct rdma_destroy_cq_output_params {
        __le16 cnq_num;
        __le16 reserved0;
        __le32 reserved1;
 };
 
+/* rdma destroy cq ramrod data */
 struct rdma_destroy_cq_ramrod_data {
        struct regpair output_params_addr;
 };
 
+/* RDMA slow path EQ cmd IDs */
 enum rdma_event_opcode {
        RDMA_EVENT_UNUSED,
        RDMA_EVENT_FUNC_INIT,
@@ -6223,6 +6952,7 @@ enum rdma_event_opcode {
        MAX_RDMA_EVENT_OPCODE
 };
 
+/* RDMA FW return code for slow path ramrods */
 enum rdma_fw_return_code {
        RDMA_RETURN_OK = 0,
        RDMA_RETURN_REGISTER_MR_BAD_STATE_ERR,
@@ -6232,20 +6962,24 @@ enum rdma_fw_return_code {
        MAX_RDMA_FW_RETURN_CODE
 };
 
+/* rdma function init header */
 struct rdma_init_func_hdr {
        u8 cnq_start_offset;
        u8 num_cnqs;
        u8 cq_ring_mode;
        u8 vf_id;
        u8 vf_valid;
-       u8 reserved[3];
+       u8 relaxed_ordering;
+       u8 reserved[2];
 };
 
+/* rdma function init ramrod data */
 struct rdma_init_func_ramrod_data {
        struct rdma_init_func_hdr params_header;
        struct rdma_cnq_params cnq_params[NUM_OF_GLOBAL_QUEUES];
 };
 
+/* RDMA ramrod command IDs */
 enum rdma_ramrod_cmd_id {
        RDMA_RAMROD_UNUSED,
        RDMA_RAMROD_FUNC_INIT,
@@ -6261,42 +6995,43 @@ enum rdma_ramrod_cmd_id {
        MAX_RDMA_RAMROD_CMD_ID
 };
 
+/* rdma register tid ramrod data */
 struct rdma_register_tid_ramrod_data {
        __le16 flags;
 #define RDMA_REGISTER_TID_RAMROD_DATA_PAGE_SIZE_LOG_MASK       0x1F
 #define RDMA_REGISTER_TID_RAMROD_DATA_PAGE_SIZE_LOG_SHIFT      0
 #define RDMA_REGISTER_TID_RAMROD_DATA_TWO_LEVEL_PBL_MASK       0x1
 #define RDMA_REGISTER_TID_RAMROD_DATA_TWO_LEVEL_PBL_SHIFT      5
-#define RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED_MASK  0x1
-#define RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED_SHIFT 6
-#define RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR_MASK      0x1
-#define RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR_SHIFT     7
-#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ_MASK 0x1
-#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ_SHIFT        8
-#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE_MASK        0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED_MASK          0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED_SHIFT         6
+#define RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR_MASK              0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR_SHIFT             7
+#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ_MASK         0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ_SHIFT                8
+#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE_MASK                0x1
 #define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE_SHIFT       9
 #define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_ATOMIC_MASK       0x1
 #define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_ATOMIC_SHIFT      10
-#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE_MASK 0x1
-#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE_SHIFT        11
-#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ_MASK  0x1
-#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ_SHIFT 12
+#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE_MASK         0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE_SHIFT                11
+#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ_MASK          0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ_SHIFT         12
 #define RDMA_REGISTER_TID_RAMROD_DATA_ENABLE_MW_BIND_MASK      0x1
 #define RDMA_REGISTER_TID_RAMROD_DATA_ENABLE_MW_BIND_SHIFT     13
-#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED_MASK    0x3
-#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED_SHIFT   14
+#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED_MASK            0x3
+#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED_SHIFT           14
        u8 flags1;
 #define RDMA_REGISTER_TID_RAMROD_DATA_PBL_PAGE_SIZE_LOG_MASK   0x1F
-#define RDMA_REGISTER_TID_RAMROD_DATA_PBL_PAGE_SIZE_LOG_SHIFT 0
-#define RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE_MASK    0x7
-#define RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE_SHIFT   5
+#define RDMA_REGISTER_TID_RAMROD_DATA_PBL_PAGE_SIZE_LOG_SHIFT  0
+#define RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE_MASK            0x7
+#define RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE_SHIFT           5
        u8 flags2;
-#define RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR_MASK      0x1
-#define RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR_SHIFT     0
+#define RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR_MASK              0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR_SHIFT             0
 #define RDMA_REGISTER_TID_RAMROD_DATA_DIF_ON_HOST_FLG_MASK     0x1
 #define RDMA_REGISTER_TID_RAMROD_DATA_DIF_ON_HOST_FLG_SHIFT    1
-#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED1_MASK   0x3F
-#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED1_SHIFT  2
+#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED1_MASK           0x3F
+#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED1_SHIFT          2
        u8 key;
        u8 length_hi;
        u8 vf_id;
@@ -6313,19 +7048,21 @@ struct rdma_register_tid_ramrod_data {
        __le32 reserved4[2];
 };
 
+/* rdma resize cq output params */
 struct rdma_resize_cq_output_params {
        __le32 old_cq_cons;
        __le32 old_cq_prod;
 };
 
+/* rdma resize cq ramrod data */
 struct rdma_resize_cq_ramrod_data {
        u8 flags;
-#define RDMA_RESIZE_CQ_RAMROD_DATA_TOGGLE_BIT_MASK        0x1
-#define RDMA_RESIZE_CQ_RAMROD_DATA_TOGGLE_BIT_SHIFT       0
-#define RDMA_RESIZE_CQ_RAMROD_DATA_IS_TWO_LEVEL_PBL_MASK  0x1
-#define RDMA_RESIZE_CQ_RAMROD_DATA_IS_TWO_LEVEL_PBL_SHIFT 1
-#define RDMA_RESIZE_CQ_RAMROD_DATA_RESERVED_MASK          0x3F
-#define RDMA_RESIZE_CQ_RAMROD_DATA_RESERVED_SHIFT         2
+#define RDMA_RESIZE_CQ_RAMROD_DATA_TOGGLE_BIT_MASK             0x1
+#define RDMA_RESIZE_CQ_RAMROD_DATA_TOGGLE_BIT_SHIFT            0
+#define RDMA_RESIZE_CQ_RAMROD_DATA_IS_TWO_LEVEL_PBL_MASK       0x1
+#define RDMA_RESIZE_CQ_RAMROD_DATA_IS_TWO_LEVEL_PBL_SHIFT      1
+#define RDMA_RESIZE_CQ_RAMROD_DATA_RESERVED_MASK               0x3F
+#define RDMA_RESIZE_CQ_RAMROD_DATA_RESERVED_SHIFT              2
        u8 pbl_log_page_size;
        __le16 pbl_num_pages;
        __le32 max_cqes;
@@ -6333,10 +7070,12 @@ struct rdma_resize_cq_ramrod_data {
        struct regpair output_params_addr;
 };
 
+/* The rdma storm context of Mstorm */
 struct rdma_srq_context {
        struct regpair temp[8];
 };
 
+/* rdma create qp requester ramrod data */
 struct rdma_srq_create_ramrod_data {
        struct regpair pbl_base_addr;
        __le16 pages_in_srq_pbl;
@@ -6348,206 +7087,19 @@ struct rdma_srq_create_ramrod_data {
        struct regpair producers_addr;
 };
 
+/* rdma create qp requester ramrod data */
 struct rdma_srq_destroy_ramrod_data {
        struct rdma_srq_id srq_id;
        __le32 reserved;
 };
 
+/* rdma create qp requester ramrod data */
 struct rdma_srq_modify_ramrod_data {
        struct rdma_srq_id srq_id;
        __le32 wqe_limit;
 };
 
-struct ystorm_rdma_task_st_ctx {
-       struct regpair temp[4];
-};
-
-struct ystorm_rdma_task_ag_ctx {
-       u8 reserved;
-       u8 byte1;
-       __le16 msem_ctx_upd_seq;
-       u8 flags0;
-#define YSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK  0xF
-#define YSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
-#define YSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK     0x1
-#define YSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT    4
-#define YSTORM_RDMA_TASK_AG_CTX_BIT1_MASK             0x1
-#define YSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT            5
-#define YSTORM_RDMA_TASK_AG_CTX_VALID_MASK            0x1
-#define YSTORM_RDMA_TASK_AG_CTX_VALID_SHIFT           6
-#define YSTORM_RDMA_TASK_AG_CTX_BIT3_MASK             0x1
-#define YSTORM_RDMA_TASK_AG_CTX_BIT3_SHIFT            7
-       u8 flags1;
-#define YSTORM_RDMA_TASK_AG_CTX_CF0_MASK              0x3
-#define YSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT             0
-#define YSTORM_RDMA_TASK_AG_CTX_CF1_MASK              0x3
-#define YSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT             2
-#define YSTORM_RDMA_TASK_AG_CTX_CF2SPECIAL_MASK       0x3
-#define YSTORM_RDMA_TASK_AG_CTX_CF2SPECIAL_SHIFT      4
-#define YSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK            0x1
-#define YSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT           6
-#define YSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK            0x1
-#define YSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT           7
-       u8 flags2;
-#define YSTORM_RDMA_TASK_AG_CTX_BIT4_MASK             0x1
-#define YSTORM_RDMA_TASK_AG_CTX_BIT4_SHIFT            0
-#define YSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK          0x1
-#define YSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT         1
-#define YSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK          0x1
-#define YSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT         2
-#define YSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK          0x1
-#define YSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT         3
-#define YSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK          0x1
-#define YSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT         4
-#define YSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK          0x1
-#define YSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT         5
-#define YSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK          0x1
-#define YSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT         6
-#define YSTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK          0x1
-#define YSTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT         7
-       u8 key;
-       __le32 mw_cnt;
-       u8 ref_cnt_seq;
-       u8 ctx_upd_seq;
-       __le16 dif_flags;
-       __le16 tx_ref_count;
-       __le16 last_used_ltid;
-       __le16 parent_mr_lo;
-       __le16 parent_mr_hi;
-       __le32 fbo_lo;
-       __le32 fbo_hi;
-};
-
-struct mstorm_rdma_task_ag_ctx {
-       u8 reserved;
-       u8 byte1;
-       __le16 icid;
-       u8 flags0;
-#define MSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK  0xF
-#define MSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
-#define MSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK     0x1
-#define MSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT    4
-#define MSTORM_RDMA_TASK_AG_CTX_BIT1_MASK             0x1
-#define MSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT            5
-#define MSTORM_RDMA_TASK_AG_CTX_BIT2_MASK             0x1
-#define MSTORM_RDMA_TASK_AG_CTX_BIT2_SHIFT            6
-#define MSTORM_RDMA_TASK_AG_CTX_BIT3_MASK             0x1
-#define MSTORM_RDMA_TASK_AG_CTX_BIT3_SHIFT            7
-       u8 flags1;
-#define MSTORM_RDMA_TASK_AG_CTX_CF0_MASK              0x3
-#define MSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT             0
-#define MSTORM_RDMA_TASK_AG_CTX_CF1_MASK              0x3
-#define MSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT             2
-#define MSTORM_RDMA_TASK_AG_CTX_CF2_MASK              0x3
-#define MSTORM_RDMA_TASK_AG_CTX_CF2_SHIFT             4
-#define MSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK            0x1
-#define MSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT           6
-#define MSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK            0x1
-#define MSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT           7
-       u8 flags2;
-#define MSTORM_RDMA_TASK_AG_CTX_CF2EN_MASK            0x1
-#define MSTORM_RDMA_TASK_AG_CTX_CF2EN_SHIFT           0
-#define MSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK          0x1
-#define MSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT         1
-#define MSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK          0x1
-#define MSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT         2
-#define MSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK          0x1
-#define MSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT         3
-#define MSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK          0x1
-#define MSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT         4
-#define MSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK          0x1
-#define MSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT         5
-#define MSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK          0x1
-#define MSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT         6
-#define MSTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK          0x1
-#define MSTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT         7
-       u8 key;
-       __le32 mw_cnt;
-       u8 ref_cnt_seq;
-       u8 ctx_upd_seq;
-       __le16 dif_flags;
-       __le16 tx_ref_count;
-       __le16 last_used_ltid;
-       __le16 parent_mr_lo;
-       __le16 parent_mr_hi;
-       __le32 fbo_lo;
-       __le32 fbo_hi;
-};
-
-struct ustorm_rdma_task_st_ctx {
-       struct regpair temp[2];
-};
-
-struct ustorm_rdma_task_ag_ctx {
-       u8 reserved;
-       u8 byte1;
-       __le16 icid;
-       u8 flags0;
-#define USTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK         0xF
-#define USTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT        0
-#define USTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK            0x1
-#define USTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT           4
-#define USTORM_RDMA_TASK_AG_CTX_DIF_RUNT_VALID_MASK          0x1
-#define USTORM_RDMA_TASK_AG_CTX_DIF_RUNT_VALID_SHIFT         5
-#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_MASK     0x3
-#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_SHIFT    6
-       u8 flags1;
-#define USTORM_RDMA_TASK_AG_CTX_DIF_RESULT_TOGGLE_BIT_MASK   0x3
-#define USTORM_RDMA_TASK_AG_CTX_DIF_RESULT_TOGGLE_BIT_SHIFT  0
-#define USTORM_RDMA_TASK_AG_CTX_DIF_TX_IO_FLG_MASK           0x3
-#define USTORM_RDMA_TASK_AG_CTX_DIF_TX_IO_FLG_SHIFT          2
-#define USTORM_RDMA_TASK_AG_CTX_CF3_MASK                     0x3
-#define USTORM_RDMA_TASK_AG_CTX_CF3_SHIFT                    4
-#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_MASK            0x3
-#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_SHIFT           6
-       u8 flags2;
-#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_EN_MASK  0x1
-#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_EN_SHIFT 0
-#define USTORM_RDMA_TASK_AG_CTX_RESERVED2_MASK               0x1
-#define USTORM_RDMA_TASK_AG_CTX_RESERVED2_SHIFT              1
-#define USTORM_RDMA_TASK_AG_CTX_RESERVED3_MASK               0x1
-#define USTORM_RDMA_TASK_AG_CTX_RESERVED3_SHIFT              2
-#define USTORM_RDMA_TASK_AG_CTX_CF3EN_MASK                   0x1
-#define USTORM_RDMA_TASK_AG_CTX_CF3EN_SHIFT                  3
-#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK         0x1
-#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT        4
-#define USTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK                 0x1
-#define USTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT                5
-#define USTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK                 0x1
-#define USTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT                6
-#define USTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK                 0x1
-#define USTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT                7
-       u8 flags3;
-#define USTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK                 0x1
-#define USTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT                0
-#define USTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK                 0x1
-#define USTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT                1
-#define USTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK                 0x1
-#define USTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT                2
-#define USTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK                 0x1
-#define USTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT                3
-#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_TYPE_MASK          0xF
-#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT         4
-       __le32 dif_err_intervals;
-       __le32 dif_error_1st_interval;
-       __le32 reg2;
-       __le32 dif_runt_value;
-       __le32 reg4;
-       __le32 reg5;
-};
-
-struct rdma_task_context {
-       struct ystorm_rdma_task_st_ctx ystorm_st_context;
-       struct ystorm_rdma_task_ag_ctx ystorm_ag_context;
-       struct tdif_task_context tdif_context;
-       struct mstorm_rdma_task_ag_ctx mstorm_ag_context;
-       struct mstorm_rdma_task_st_ctx mstorm_st_context;
-       struct rdif_task_context rdif_context;
-       struct ustorm_rdma_task_st_ctx ustorm_st_context;
-       struct regpair ustorm_st_padding[2];
-       struct ustorm_rdma_task_ag_ctx ustorm_ag_context;
-};
-
+/* RDMA Tid type enumeration (for register_tid ramrod) */
 enum rdma_tid_type {
        RDMA_TID_REGISTERED_MR,
        RDMA_TID_FMR,
@@ -6556,214 +7108,214 @@ enum rdma_tid_type {
        MAX_RDMA_TID_TYPE
 };
 
-struct xstorm_roce_conn_ag_ctx_dq_ext_ld_part {
+struct e4_xstorm_roce_conn_ag_ctx_dq_ext_ld_part {
        u8 reserved0;
        u8 state;
        u8 flags0;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM0_MASK      0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM0_SHIFT     0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT1_MASK              0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT1_SHIFT             1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT2_MASK              0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT2_SHIFT             2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM3_MASK      0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM3_SHIFT     3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT4_MASK              0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT4_SHIFT             4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT5_MASK              0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT5_SHIFT             5
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT6_MASK              0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT6_SHIFT             6
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT7_MASK              0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT7_SHIFT             7
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM0_MASK     0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM0_SHIFT    0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT1_MASK             0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT1_SHIFT            1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT2_MASK             0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT2_SHIFT            2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM3_MASK     0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM3_SHIFT    3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT4_MASK             0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT4_SHIFT            4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT5_MASK             0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT5_SHIFT            5
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT6_MASK             0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT6_SHIFT            6
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT7_MASK             0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT7_SHIFT            7
        u8 flags1;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT8_MASK              0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT8_SHIFT             0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT9_MASK              0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT9_SHIFT             1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT10_MASK             0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT10_SHIFT            2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT11_MASK             0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT11_SHIFT            3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT12_MASK             0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT12_SHIFT            4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_MSTORM_FLUSH_MASK      0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_MSTORM_FLUSH_SHIFT     5
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT14_MASK             0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT14_SHIFT            6
-#define XSTORMROCECONNAGCTXDQEXTLDPART_YSTORM_FLUSH_MASK      0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_YSTORM_FLUSH_SHIFT     7
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT8_MASK             0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT8_SHIFT            0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT9_MASK             0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT9_SHIFT            1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT10_MASK            0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT10_SHIFT           2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT11_MASK            0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT11_SHIFT           3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT12_MASK            0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT12_SHIFT           4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_MSTORM_FLUSH_MASK     0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_MSTORM_FLUSH_SHIFT    5
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT14_MASK            0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT14_SHIFT           6
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_YSTORM_FLUSH_MASK     0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_YSTORM_FLUSH_SHIFT    7
        u8 flags2;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF0_MASK               0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF0_SHIFT              0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF1_MASK               0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF1_SHIFT              2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF2_MASK               0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF2_SHIFT              4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF3_MASK               0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF3_SHIFT              6
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF0_MASK      0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF0_SHIFT     0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF1_MASK      0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF1_SHIFT     2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF2_MASK      0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF2_SHIFT     4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF3_MASK      0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF3_SHIFT     6
        u8 flags3;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF4_MASK               0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF4_SHIFT              0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF5_MASK               0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF5_SHIFT              2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF6_MASK               0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF6_SHIFT              4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_MASK       0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_SHIFT      6
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF4_MASK              0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF4_SHIFT             0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF5_MASK              0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF5_SHIFT             2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF6_MASK              0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF6_SHIFT             4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_MASK      0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_SHIFT     6
        u8 flags4;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF8_MASK               0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF8_SHIFT              0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF9_MASK               0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF9_SHIFT              2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF10_MASK              0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF10_SHIFT             4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF11_MASK              0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF11_SHIFT             6
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF8_MASK      0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF8_SHIFT     0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF9_MASK      0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF9_SHIFT     2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF10_MASK     0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF10_SHIFT    4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF11_MASK     0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF11_SHIFT    6
        u8 flags5;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF12_MASK              0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF12_SHIFT             0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF13_MASK              0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF13_SHIFT             2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF14_MASK              0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF14_SHIFT             4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF15_MASK              0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF15_SHIFT             6
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF12_MASK     0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF12_SHIFT    0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF13_MASK     0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF13_SHIFT    2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF14_MASK     0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF14_SHIFT    4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF15_MASK     0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF15_SHIFT    6
        u8 flags6;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF16_MASK              0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF16_SHIFT             0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF17_MASK              0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF17_SHIFT             2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF18_MASK              0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF18_SHIFT             4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF19_MASK              0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF19_SHIFT             6
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF16_MASK     0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF16_SHIFT    0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF17_MASK     0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF17_SHIFT    2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF18_MASK     0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF18_SHIFT    4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF19_MASK     0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF19_SHIFT    6
        u8 flags7;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF20_MASK              0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF20_SHIFT             0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF21_MASK              0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF21_SHIFT             2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_MASK         0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_SHIFT        4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF0EN_MASK             0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF0EN_SHIFT            6
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF1EN_MASK             0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF1EN_SHIFT            7
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF20_MASK             0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF20_SHIFT            0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF21_MASK             0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF21_SHIFT            2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_MASK                0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_SHIFT       4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF0EN_MASK            0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF0EN_SHIFT           6
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF1EN_MASK            0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF1EN_SHIFT           7
        u8 flags8;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF2EN_MASK             0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF2EN_SHIFT            0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF3EN_MASK             0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF3EN_SHIFT            1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF4EN_MASK             0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF4EN_SHIFT            2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF5EN_MASK             0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF5EN_SHIFT            3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF6EN_MASK             0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF6EN_SHIFT            4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_EN_MASK    0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_EN_SHIFT   5
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF8EN_MASK             0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF8EN_SHIFT            6
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF9EN_MASK             0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF9EN_SHIFT            7
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF2EN_MASK            0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF2EN_SHIFT           0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF3EN_MASK            0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF3EN_SHIFT           1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF4EN_MASK            0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF4EN_SHIFT           2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF5EN_MASK            0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF5EN_SHIFT           3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF6EN_MASK            0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF6EN_SHIFT           4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_EN_MASK   0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_EN_SHIFT  5
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF8EN_MASK            0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF8EN_SHIFT           6
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF9EN_MASK            0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF9EN_SHIFT           7
        u8 flags9;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF10EN_MASK            0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF10EN_SHIFT           0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF11EN_MASK            0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF11EN_SHIFT           1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF12EN_MASK            0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF12EN_SHIFT           2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF13EN_MASK            0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF13EN_SHIFT           3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF14EN_MASK            0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF14EN_SHIFT           4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF15EN_MASK            0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF15EN_SHIFT           5
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF16EN_MASK            0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF16EN_SHIFT           6
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF17EN_MASK            0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF17EN_SHIFT           7
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF10EN_MASK   0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF10EN_SHIFT  0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF11EN_MASK   0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF11EN_SHIFT  1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF12EN_MASK   0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF12EN_SHIFT  2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF13EN_MASK   0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF13EN_SHIFT  3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF14EN_MASK   0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF14EN_SHIFT  4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF15EN_MASK   0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF15EN_SHIFT  5
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF16EN_MASK   0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF16EN_SHIFT  6
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF17EN_MASK   0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF17EN_SHIFT  7
        u8 flags10;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF18EN_MASK            0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF18EN_SHIFT           0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF19EN_MASK            0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF19EN_SHIFT           1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF20EN_MASK            0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF20EN_SHIFT           2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF21EN_MASK            0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF21EN_SHIFT           3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_EN_MASK      0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_EN_SHIFT     4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF23EN_MASK            0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF23EN_SHIFT           5
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE0EN_MASK           0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE0EN_SHIFT          6
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE1EN_MASK           0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE1EN_SHIFT          7
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF18EN_MASK           0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF18EN_SHIFT          0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF19EN_MASK           0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF19EN_SHIFT          1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF20EN_MASK           0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF20EN_SHIFT          2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF21EN_MASK           0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF21EN_SHIFT          3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_EN_MASK     0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_EN_SHIFT    4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF23EN_MASK           0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF23EN_SHIFT          5
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE0EN_MASK          0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE0EN_SHIFT         6
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE1EN_MASK          0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE1EN_SHIFT         7
        u8 flags11;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE2EN_MASK           0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE2EN_SHIFT          0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE3EN_MASK           0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE3EN_SHIFT          1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE4EN_MASK           0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE4EN_SHIFT          2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE5EN_MASK           0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE5EN_SHIFT          3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE6EN_MASK           0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE6EN_SHIFT          4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE7EN_MASK           0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE7EN_SHIFT          5
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED1_MASK      0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED1_SHIFT     6
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE9EN_MASK           0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE9EN_SHIFT          7
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE2EN_MASK          0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE2EN_SHIFT         0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE3EN_MASK          0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE3EN_SHIFT         1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE4EN_MASK          0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE4EN_SHIFT         2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE5EN_MASK          0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE5EN_SHIFT         3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE6EN_MASK          0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE6EN_SHIFT         4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE7EN_MASK          0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE7EN_SHIFT         5
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED1_MASK     0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED1_SHIFT    6
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE9EN_MASK          0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE9EN_SHIFT         7
        u8 flags12;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE10EN_MASK          0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE10EN_SHIFT         0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE11EN_MASK          0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE11EN_SHIFT         1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED2_MASK      0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED2_SHIFT     2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED3_MASK      0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED3_SHIFT     3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE14EN_MASK          0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE14EN_SHIFT         4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE15EN_MASK          0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE15EN_SHIFT         5
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE16EN_MASK          0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE16EN_SHIFT         6
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE17EN_MASK          0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE17EN_SHIFT         7
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE10EN_MASK         0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE10EN_SHIFT                0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE11EN_MASK         0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE11EN_SHIFT                1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED2_MASK     0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED2_SHIFT    2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED3_MASK     0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED3_SHIFT    3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE14EN_MASK         0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE14EN_SHIFT                4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE15EN_MASK         0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE15EN_SHIFT                5
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE16EN_MASK         0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE16EN_SHIFT                6
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE17EN_MASK         0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE17EN_SHIFT                7
        u8 flags13;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE18EN_MASK          0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE18EN_SHIFT         0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE19EN_MASK          0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE19EN_SHIFT         1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED4_MASK      0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED4_SHIFT     2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED5_MASK      0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED5_SHIFT     3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED6_MASK      0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED6_SHIFT     4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED7_MASK      0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED7_SHIFT     5
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED8_MASK      0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED8_SHIFT     6
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED9_MASK      0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED9_SHIFT     7
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE18EN_MASK         0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE18EN_SHIFT                0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE19EN_MASK         0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE19EN_SHIFT                1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED4_MASK     0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED4_SHIFT    2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED5_MASK     0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED5_SHIFT    3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED6_MASK     0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED6_SHIFT    4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED7_MASK     0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED7_SHIFT    5
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED8_MASK     0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED8_SHIFT    6
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED9_MASK     0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED9_SHIFT    7
        u8 flags14;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_MIGRATION_MASK         0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_MIGRATION_SHIFT        0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT17_MASK             0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT17_SHIFT            1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_DPM_PORT_NUM_MASK      0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_DPM_PORT_NUM_SHIFT     2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RESERVED_MASK          0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RESERVED_SHIFT         4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_MASK  0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_SHIFT 5
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF23_MASK              0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF23_SHIFT             6
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_MIGRATION_MASK                0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_MIGRATION_SHIFT       0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT17_MASK            0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT17_SHIFT           1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_DPM_PORT_NUM_MASK     0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_DPM_PORT_NUM_SHIFT    2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RESERVED_MASK         0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RESERVED_SHIFT                4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_SHIFT        5
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF23_MASK             0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF23_SHIFT            6
        u8 byte2;
        __le16 physical_q0;
        __le16 word1;
@@ -6783,126 +7335,126 @@ struct xstorm_roce_conn_ag_ctx_dq_ext_ld_part {
        __le32 reg4;
 };
 
-struct mstorm_rdma_conn_ag_ctx {
+struct e4_mstorm_rdma_conn_ag_ctx {
        u8 byte0;
        u8 byte1;
        u8 flags0;
-#define MSTORM_RDMA_CONN_AG_CTX_BIT0_MASK     0x1
-#define MSTORM_RDMA_CONN_AG_CTX_BIT0_SHIFT    0
-#define MSTORM_RDMA_CONN_AG_CTX_BIT1_MASK     0x1
-#define MSTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT    1
-#define MSTORM_RDMA_CONN_AG_CTX_CF0_MASK      0x3
-#define MSTORM_RDMA_CONN_AG_CTX_CF0_SHIFT     2
-#define MSTORM_RDMA_CONN_AG_CTX_CF1_MASK      0x3
-#define MSTORM_RDMA_CONN_AG_CTX_CF1_SHIFT     4
-#define MSTORM_RDMA_CONN_AG_CTX_CF2_MASK      0x3
-#define MSTORM_RDMA_CONN_AG_CTX_CF2_SHIFT     6
+#define E4_MSTORM_RDMA_CONN_AG_CTX_BIT0_MASK   0x1
+#define E4_MSTORM_RDMA_CONN_AG_CTX_BIT0_SHIFT  0
+#define E4_MSTORM_RDMA_CONN_AG_CTX_BIT1_MASK   0x1
+#define E4_MSTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT  1
+#define E4_MSTORM_RDMA_CONN_AG_CTX_CF0_MASK    0x3
+#define E4_MSTORM_RDMA_CONN_AG_CTX_CF0_SHIFT   2
+#define E4_MSTORM_RDMA_CONN_AG_CTX_CF1_MASK    0x3
+#define E4_MSTORM_RDMA_CONN_AG_CTX_CF1_SHIFT   4
+#define E4_MSTORM_RDMA_CONN_AG_CTX_CF2_MASK    0x3
+#define E4_MSTORM_RDMA_CONN_AG_CTX_CF2_SHIFT   6
        u8 flags1;
-#define MSTORM_RDMA_CONN_AG_CTX_CF0EN_MASK    0x1
-#define MSTORM_RDMA_CONN_AG_CTX_CF0EN_SHIFT   0
-#define MSTORM_RDMA_CONN_AG_CTX_CF1EN_MASK    0x1
-#define MSTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT   1
-#define MSTORM_RDMA_CONN_AG_CTX_CF2EN_MASK    0x1
-#define MSTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT   2
-#define MSTORM_RDMA_CONN_AG_CTX_RULE0EN_MASK  0x1
-#define MSTORM_RDMA_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define MSTORM_RDMA_CONN_AG_CTX_RULE1EN_MASK  0x1
-#define MSTORM_RDMA_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define MSTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK  0x1
-#define MSTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define MSTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK  0x1
-#define MSTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define MSTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK  0x1
-#define MSTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define E4_MSTORM_RDMA_CONN_AG_CTX_CF0EN_MASK          0x1
+#define E4_MSTORM_RDMA_CONN_AG_CTX_CF0EN_SHIFT         0
+#define E4_MSTORM_RDMA_CONN_AG_CTX_CF1EN_MASK          0x1
+#define E4_MSTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT         1
+#define E4_MSTORM_RDMA_CONN_AG_CTX_CF2EN_MASK          0x1
+#define E4_MSTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT         2
+#define E4_MSTORM_RDMA_CONN_AG_CTX_RULE0EN_MASK                0x1
+#define E4_MSTORM_RDMA_CONN_AG_CTX_RULE0EN_SHIFT       3
+#define E4_MSTORM_RDMA_CONN_AG_CTX_RULE1EN_MASK                0x1
+#define E4_MSTORM_RDMA_CONN_AG_CTX_RULE1EN_SHIFT       4
+#define E4_MSTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK                0x1
+#define E4_MSTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT       5
+#define E4_MSTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK                0x1
+#define E4_MSTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT       6
+#define E4_MSTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK                0x1
+#define E4_MSTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT       7
        __le16 word0;
        __le16 word1;
        __le32 reg0;
        __le32 reg1;
 };
 
-struct tstorm_rdma_conn_ag_ctx {
+struct e4_tstorm_rdma_conn_ag_ctx {
        u8 reserved0;
        u8 byte1;
        u8 flags0;
-#define TSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_MASK          0x1
-#define TSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_SHIFT         0
-#define TSTORM_RDMA_CONN_AG_CTX_BIT1_MASK                  0x1
-#define TSTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT                 1
-#define TSTORM_RDMA_CONN_AG_CTX_BIT2_MASK                  0x1
-#define TSTORM_RDMA_CONN_AG_CTX_BIT2_SHIFT                 2
-#define TSTORM_RDMA_CONN_AG_CTX_BIT3_MASK                  0x1
-#define TSTORM_RDMA_CONN_AG_CTX_BIT3_SHIFT                 3
-#define TSTORM_RDMA_CONN_AG_CTX_BIT4_MASK                  0x1
-#define TSTORM_RDMA_CONN_AG_CTX_BIT4_SHIFT                 4
-#define TSTORM_RDMA_CONN_AG_CTX_BIT5_MASK                  0x1
-#define TSTORM_RDMA_CONN_AG_CTX_BIT5_SHIFT                 5
-#define TSTORM_RDMA_CONN_AG_CTX_CF0_MASK                   0x3
-#define TSTORM_RDMA_CONN_AG_CTX_CF0_SHIFT                  6
+#define E4_TSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_MASK   0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_SHIFT  0
+#define E4_TSTORM_RDMA_CONN_AG_CTX_BIT1_MASK           0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT          1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_BIT2_MASK           0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_BIT2_SHIFT          2
+#define E4_TSTORM_RDMA_CONN_AG_CTX_BIT3_MASK           0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_BIT3_SHIFT          3
+#define E4_TSTORM_RDMA_CONN_AG_CTX_BIT4_MASK           0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_BIT4_SHIFT          4
+#define E4_TSTORM_RDMA_CONN_AG_CTX_BIT5_MASK           0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_BIT5_SHIFT          5
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF0_MASK            0x3
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF0_SHIFT           6
        u8 flags1;
-#define TSTORM_RDMA_CONN_AG_CTX_CF1_MASK                   0x3
-#define TSTORM_RDMA_CONN_AG_CTX_CF1_SHIFT                  0
-#define TSTORM_RDMA_CONN_AG_CTX_CF2_MASK                   0x3
-#define TSTORM_RDMA_CONN_AG_CTX_CF2_SHIFT                  2
-#define TSTORM_RDMA_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK     0x3
-#define TSTORM_RDMA_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT    4
-#define TSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_MASK           0x3
-#define TSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT          6
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF1_MASK                    0x3
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF1_SHIFT                   0
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF2_MASK                    0x3
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF2_SHIFT                   2
+#define E4_TSTORM_RDMA_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK      0x3
+#define E4_TSTORM_RDMA_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT     4
+#define E4_TSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_MASK            0x3
+#define E4_TSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT           6
        u8 flags2;
-#define TSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK       0x3
-#define TSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT      0
-#define TSTORM_RDMA_CONN_AG_CTX_CF6_MASK                   0x3
-#define TSTORM_RDMA_CONN_AG_CTX_CF6_SHIFT                  2
-#define TSTORM_RDMA_CONN_AG_CTX_CF7_MASK                   0x3
-#define TSTORM_RDMA_CONN_AG_CTX_CF7_SHIFT                  4
-#define TSTORM_RDMA_CONN_AG_CTX_CF8_MASK                   0x3
-#define TSTORM_RDMA_CONN_AG_CTX_CF8_SHIFT                  6
+#define E4_TSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK                0x3
+#define E4_TSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT       0
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF6_MASK                    0x3
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF6_SHIFT                   2
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF7_MASK                    0x3
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF7_SHIFT                   4
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF8_MASK                    0x3
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF8_SHIFT                   6
        u8 flags3;
-#define TSTORM_RDMA_CONN_AG_CTX_CF9_MASK                   0x3
-#define TSTORM_RDMA_CONN_AG_CTX_CF9_SHIFT                  0
-#define TSTORM_RDMA_CONN_AG_CTX_CF10_MASK                  0x3
-#define TSTORM_RDMA_CONN_AG_CTX_CF10_SHIFT                 2
-#define TSTORM_RDMA_CONN_AG_CTX_CF0EN_MASK                 0x1
-#define TSTORM_RDMA_CONN_AG_CTX_CF0EN_SHIFT                4
-#define TSTORM_RDMA_CONN_AG_CTX_CF1EN_MASK                 0x1
-#define TSTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT                5
-#define TSTORM_RDMA_CONN_AG_CTX_CF2EN_MASK                 0x1
-#define TSTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT                6
-#define TSTORM_RDMA_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK  0x1
-#define TSTORM_RDMA_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT 7
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF9_MASK                    0x3
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF9_SHIFT                   0
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF10_MASK                   0x3
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF10_SHIFT                  2
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF0EN_MASK                  0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF0EN_SHIFT                 4
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF1EN_MASK                  0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT                 5
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF2EN_MASK                  0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT                 6
+#define E4_TSTORM_RDMA_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK   0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT  7
        u8 flags4;
-#define TSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK        0x1
-#define TSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT       0
-#define TSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK    0x1
-#define TSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT   1
-#define TSTORM_RDMA_CONN_AG_CTX_CF6EN_MASK                 0x1
-#define TSTORM_RDMA_CONN_AG_CTX_CF6EN_SHIFT                2
-#define TSTORM_RDMA_CONN_AG_CTX_CF7EN_MASK                 0x1
-#define TSTORM_RDMA_CONN_AG_CTX_CF7EN_SHIFT                3
-#define TSTORM_RDMA_CONN_AG_CTX_CF8EN_MASK                 0x1
-#define TSTORM_RDMA_CONN_AG_CTX_CF8EN_SHIFT                4
-#define TSTORM_RDMA_CONN_AG_CTX_CF9EN_MASK                 0x1
-#define TSTORM_RDMA_CONN_AG_CTX_CF9EN_SHIFT                5
-#define TSTORM_RDMA_CONN_AG_CTX_CF10EN_MASK                0x1
-#define TSTORM_RDMA_CONN_AG_CTX_CF10EN_SHIFT               6
-#define TSTORM_RDMA_CONN_AG_CTX_RULE0EN_MASK               0x1
-#define TSTORM_RDMA_CONN_AG_CTX_RULE0EN_SHIFT              7
+#define E4_TSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK         0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT                0
+#define E4_TSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK     0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT    1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF6EN_MASK                  0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF6EN_SHIFT                 2
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF7EN_MASK                  0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF7EN_SHIFT                 3
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF8EN_MASK                  0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF8EN_SHIFT                 4
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF9EN_MASK                  0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF9EN_SHIFT                 5
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF10EN_MASK                 0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF10EN_SHIFT                        6
+#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE0EN_MASK                        0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE0EN_SHIFT               7
        u8 flags5;
-#define TSTORM_RDMA_CONN_AG_CTX_RULE1EN_MASK               0x1
-#define TSTORM_RDMA_CONN_AG_CTX_RULE1EN_SHIFT              0
-#define TSTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK               0x1
-#define TSTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT              1
-#define TSTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK               0x1
-#define TSTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT              2
-#define TSTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK               0x1
-#define TSTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT              3
-#define TSTORM_RDMA_CONN_AG_CTX_RULE5EN_MASK               0x1
-#define TSTORM_RDMA_CONN_AG_CTX_RULE5EN_SHIFT              4
-#define TSTORM_RDMA_CONN_AG_CTX_RULE6EN_MASK               0x1
-#define TSTORM_RDMA_CONN_AG_CTX_RULE6EN_SHIFT              5
-#define TSTORM_RDMA_CONN_AG_CTX_RULE7EN_MASK               0x1
-#define TSTORM_RDMA_CONN_AG_CTX_RULE7EN_SHIFT              6
-#define TSTORM_RDMA_CONN_AG_CTX_RULE8EN_MASK               0x1
-#define TSTORM_RDMA_CONN_AG_CTX_RULE8EN_SHIFT              7
+#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE1EN_MASK                0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE1EN_SHIFT       0
+#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK                0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT       1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK                0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT       2
+#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK                0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT       3
+#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE5EN_MASK                0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE5EN_SHIFT       4
+#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE6EN_MASK                0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE6EN_SHIFT       5
+#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE7EN_MASK                0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE7EN_SHIFT       6
+#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE8EN_MASK                0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE8EN_SHIFT       7
        __le32 reg0;
        __le32 reg1;
        __le32 reg2;
@@ -6924,73 +7476,73 @@ struct tstorm_rdma_conn_ag_ctx {
        __le32 reg10;
 };
 
-struct tstorm_rdma_task_ag_ctx {
+struct e4_tstorm_rdma_task_ag_ctx {
        u8 byte0;
        u8 byte1;
        __le16 word0;
        u8 flags0;
-#define TSTORM_RDMA_TASK_AG_CTX_NIBBLE0_MASK  0xF
-#define TSTORM_RDMA_TASK_AG_CTX_NIBBLE0_SHIFT 0
-#define TSTORM_RDMA_TASK_AG_CTX_BIT0_MASK     0x1
-#define TSTORM_RDMA_TASK_AG_CTX_BIT0_SHIFT    4
-#define TSTORM_RDMA_TASK_AG_CTX_BIT1_MASK     0x1
-#define TSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT    5
-#define TSTORM_RDMA_TASK_AG_CTX_BIT2_MASK     0x1
-#define TSTORM_RDMA_TASK_AG_CTX_BIT2_SHIFT    6
-#define TSTORM_RDMA_TASK_AG_CTX_BIT3_MASK     0x1
-#define TSTORM_RDMA_TASK_AG_CTX_BIT3_SHIFT    7
+#define E4_TSTORM_RDMA_TASK_AG_CTX_NIBBLE0_MASK                0xF
+#define E4_TSTORM_RDMA_TASK_AG_CTX_NIBBLE0_SHIFT       0
+#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT0_MASK           0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT0_SHIFT          4
+#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT1_MASK           0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT          5
+#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT2_MASK           0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT2_SHIFT          6
+#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT3_MASK           0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT3_SHIFT          7
        u8 flags1;
-#define TSTORM_RDMA_TASK_AG_CTX_BIT4_MASK     0x1
-#define TSTORM_RDMA_TASK_AG_CTX_BIT4_SHIFT    0
-#define TSTORM_RDMA_TASK_AG_CTX_BIT5_MASK     0x1
-#define TSTORM_RDMA_TASK_AG_CTX_BIT5_SHIFT    1
-#define TSTORM_RDMA_TASK_AG_CTX_CF0_MASK      0x3
-#define TSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT     2
-#define TSTORM_RDMA_TASK_AG_CTX_CF1_MASK      0x3
-#define TSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT     4
-#define TSTORM_RDMA_TASK_AG_CTX_CF2_MASK      0x3
-#define TSTORM_RDMA_TASK_AG_CTX_CF2_SHIFT     6
+#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT4_MASK   0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT4_SHIFT  0
+#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT5_MASK   0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT5_SHIFT  1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF0_MASK    0x3
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT   2
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF1_MASK    0x3
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT   4
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF2_MASK    0x3
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF2_SHIFT   6
        u8 flags2;
-#define TSTORM_RDMA_TASK_AG_CTX_CF3_MASK      0x3
-#define TSTORM_RDMA_TASK_AG_CTX_CF3_SHIFT     0
-#define TSTORM_RDMA_TASK_AG_CTX_CF4_MASK      0x3
-#define TSTORM_RDMA_TASK_AG_CTX_CF4_SHIFT     2
-#define TSTORM_RDMA_TASK_AG_CTX_CF5_MASK      0x3
-#define TSTORM_RDMA_TASK_AG_CTX_CF5_SHIFT     4
-#define TSTORM_RDMA_TASK_AG_CTX_CF6_MASK      0x3
-#define TSTORM_RDMA_TASK_AG_CTX_CF6_SHIFT     6
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF3_MASK    0x3
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF3_SHIFT   0
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF4_MASK    0x3
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF4_SHIFT   2
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF5_MASK    0x3
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF5_SHIFT   4
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF6_MASK    0x3
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF6_SHIFT   6
        u8 flags3;
-#define TSTORM_RDMA_TASK_AG_CTX_CF7_MASK      0x3
-#define TSTORM_RDMA_TASK_AG_CTX_CF7_SHIFT     0
-#define TSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK    0x1
-#define TSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT   2
-#define TSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK    0x1
-#define TSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT   3
-#define TSTORM_RDMA_TASK_AG_CTX_CF2EN_MASK    0x1
-#define TSTORM_RDMA_TASK_AG_CTX_CF2EN_SHIFT   4
-#define TSTORM_RDMA_TASK_AG_CTX_CF3EN_MASK    0x1
-#define TSTORM_RDMA_TASK_AG_CTX_CF3EN_SHIFT   5
-#define TSTORM_RDMA_TASK_AG_CTX_CF4EN_MASK    0x1
-#define TSTORM_RDMA_TASK_AG_CTX_CF4EN_SHIFT   6
-#define TSTORM_RDMA_TASK_AG_CTX_CF5EN_MASK    0x1
-#define TSTORM_RDMA_TASK_AG_CTX_CF5EN_SHIFT   7
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF7_MASK    0x3
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF7_SHIFT   0
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK  0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 2
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK  0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 3
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF2EN_MASK  0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF2EN_SHIFT 4
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF3EN_MASK  0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF3EN_SHIFT 5
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF4EN_MASK  0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF4EN_SHIFT 6
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF5EN_MASK  0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF5EN_SHIFT 7
        u8 flags4;
-#define TSTORM_RDMA_TASK_AG_CTX_CF6EN_MASK    0x1
-#define TSTORM_RDMA_TASK_AG_CTX_CF6EN_SHIFT   0
-#define TSTORM_RDMA_TASK_AG_CTX_CF7EN_MASK    0x1
-#define TSTORM_RDMA_TASK_AG_CTX_CF7EN_SHIFT   1
-#define TSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK  0x1
-#define TSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 2
-#define TSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK  0x1
-#define TSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 3
-#define TSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK  0x1
-#define TSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 4
-#define TSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK  0x1
-#define TSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 5
-#define TSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK  0x1
-#define TSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 6
-#define TSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK  0x1
-#define TSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 7
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF6EN_MASK          0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF6EN_SHIFT         0
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF7EN_MASK          0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF7EN_SHIFT         1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK                0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT       2
+#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK                0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT       3
+#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK                0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT       4
+#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK                0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT       5
+#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK                0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT       6
+#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK                0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT       7
        u8 byte2;
        __le16 word1;
        __le32 reg0;
@@ -7003,63 +7555,63 @@ struct tstorm_rdma_task_ag_ctx {
        __le32 reg2;
 };
 
-struct ustorm_rdma_conn_ag_ctx {
+struct e4_ustorm_rdma_conn_ag_ctx {
        u8 reserved;
        u8 byte1;
        u8 flags0;
-#define USTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_MASK     0x1
-#define USTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_SHIFT    0
-#define USTORM_RDMA_CONN_AG_CTX_BIT1_MASK             0x1
-#define USTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT            1
-#define USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_MASK      0x3
-#define USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT     2
-#define USTORM_RDMA_CONN_AG_CTX_CF1_MASK              0x3
-#define USTORM_RDMA_CONN_AG_CTX_CF1_SHIFT             4
-#define USTORM_RDMA_CONN_AG_CTX_CF2_MASK              0x3
-#define USTORM_RDMA_CONN_AG_CTX_CF2_SHIFT             6
+#define E4_USTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_MASK   0x1
+#define E4_USTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_SHIFT  0
+#define E4_USTORM_RDMA_CONN_AG_CTX_BIT1_MASK           0x1
+#define E4_USTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT          1
+#define E4_USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_MASK    0x3
+#define E4_USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT   2
+#define E4_USTORM_RDMA_CONN_AG_CTX_CF1_MASK            0x3
+#define E4_USTORM_RDMA_CONN_AG_CTX_CF1_SHIFT           4
+#define E4_USTORM_RDMA_CONN_AG_CTX_CF2_MASK            0x3
+#define E4_USTORM_RDMA_CONN_AG_CTX_CF2_SHIFT           6
        u8 flags1;
-#define USTORM_RDMA_CONN_AG_CTX_CF3_MASK              0x3
-#define USTORM_RDMA_CONN_AG_CTX_CF3_SHIFT             0
-#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_MASK     0x3
-#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_SHIFT    2
-#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_MASK        0x3
-#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_SHIFT       4
-#define USTORM_RDMA_CONN_AG_CTX_CF6_MASK              0x3
-#define USTORM_RDMA_CONN_AG_CTX_CF6_SHIFT             6
+#define E4_USTORM_RDMA_CONN_AG_CTX_CF3_MASK            0x3
+#define E4_USTORM_RDMA_CONN_AG_CTX_CF3_SHIFT           0
+#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_MASK   0x3
+#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_SHIFT  2
+#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_MASK      0x3
+#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_SHIFT     4
+#define E4_USTORM_RDMA_CONN_AG_CTX_CF6_MASK            0x3
+#define E4_USTORM_RDMA_CONN_AG_CTX_CF6_SHIFT           6
        u8 flags2;
-#define USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK   0x1
-#define USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT  0
-#define USTORM_RDMA_CONN_AG_CTX_CF1EN_MASK            0x1
-#define USTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT           1
-#define USTORM_RDMA_CONN_AG_CTX_CF2EN_MASK            0x1
-#define USTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT           2
-#define USTORM_RDMA_CONN_AG_CTX_CF3EN_MASK            0x1
-#define USTORM_RDMA_CONN_AG_CTX_CF3EN_SHIFT           3
-#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_EN_MASK  0x1
-#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_EN_SHIFT 4
-#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_EN_MASK     0x1
-#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_EN_SHIFT    5
-#define USTORM_RDMA_CONN_AG_CTX_CF6EN_MASK            0x1
-#define USTORM_RDMA_CONN_AG_CTX_CF6EN_SHIFT           6
-#define USTORM_RDMA_CONN_AG_CTX_CQ_SE_EN_MASK         0x1
-#define USTORM_RDMA_CONN_AG_CTX_CQ_SE_EN_SHIFT        7
+#define E4_USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK         0x1
+#define E4_USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT                0
+#define E4_USTORM_RDMA_CONN_AG_CTX_CF1EN_MASK                  0x1
+#define E4_USTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT                 1
+#define E4_USTORM_RDMA_CONN_AG_CTX_CF2EN_MASK                  0x1
+#define E4_USTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT                 2
+#define E4_USTORM_RDMA_CONN_AG_CTX_CF3EN_MASK                  0x1
+#define E4_USTORM_RDMA_CONN_AG_CTX_CF3EN_SHIFT                 3
+#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_EN_MASK                0x1
+#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_EN_SHIFT       4
+#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_EN_MASK           0x1
+#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_EN_SHIFT          5
+#define E4_USTORM_RDMA_CONN_AG_CTX_CF6EN_MASK                  0x1
+#define E4_USTORM_RDMA_CONN_AG_CTX_CF6EN_SHIFT                 6
+#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_SE_EN_MASK               0x1
+#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_SE_EN_SHIFT              7
        u8 flags3;
-#define USTORM_RDMA_CONN_AG_CTX_CQ_EN_MASK            0x1
-#define USTORM_RDMA_CONN_AG_CTX_CQ_EN_SHIFT           0
-#define USTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK          0x1
-#define USTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT         1
-#define USTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK          0x1
-#define USTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT         2
-#define USTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK          0x1
-#define USTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT         3
-#define USTORM_RDMA_CONN_AG_CTX_RULE5EN_MASK          0x1
-#define USTORM_RDMA_CONN_AG_CTX_RULE5EN_SHIFT         4
-#define USTORM_RDMA_CONN_AG_CTX_RULE6EN_MASK          0x1
-#define USTORM_RDMA_CONN_AG_CTX_RULE6EN_SHIFT         5
-#define USTORM_RDMA_CONN_AG_CTX_RULE7EN_MASK          0x1
-#define USTORM_RDMA_CONN_AG_CTX_RULE7EN_SHIFT         6
-#define USTORM_RDMA_CONN_AG_CTX_RULE8EN_MASK          0x1
-#define USTORM_RDMA_CONN_AG_CTX_RULE8EN_SHIFT         7
+#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_EN_MASK          0x1
+#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_EN_SHIFT         0
+#define E4_USTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK                0x1
+#define E4_USTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT       1
+#define E4_USTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK                0x1
+#define E4_USTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT       2
+#define E4_USTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK                0x1
+#define E4_USTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT       3
+#define E4_USTORM_RDMA_CONN_AG_CTX_RULE5EN_MASK                0x1
+#define E4_USTORM_RDMA_CONN_AG_CTX_RULE5EN_SHIFT       4
+#define E4_USTORM_RDMA_CONN_AG_CTX_RULE6EN_MASK                0x1
+#define E4_USTORM_RDMA_CONN_AG_CTX_RULE6EN_SHIFT       5
+#define E4_USTORM_RDMA_CONN_AG_CTX_RULE7EN_MASK                0x1
+#define E4_USTORM_RDMA_CONN_AG_CTX_RULE7EN_SHIFT       6
+#define E4_USTORM_RDMA_CONN_AG_CTX_RULE8EN_MASK                0x1
+#define E4_USTORM_RDMA_CONN_AG_CTX_RULE8EN_SHIFT       7
        u8 byte2;
        u8 byte3;
        __le16 conn_dpi;
@@ -7072,214 +7624,214 @@ struct ustorm_rdma_conn_ag_ctx {
        __le16 word3;
 };
 
-struct xstorm_rdma_conn_ag_ctx {
+struct e4_xstorm_rdma_conn_ag_ctx {
        u8 reserved0;
        u8 state;
        u8 flags0;
-#define XSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_MASK      0x1
-#define XSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_SHIFT     0
-#define XSTORM_RDMA_CONN_AG_CTX_BIT1_MASK              0x1
-#define XSTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT             1
-#define XSTORM_RDMA_CONN_AG_CTX_BIT2_MASK              0x1
-#define XSTORM_RDMA_CONN_AG_CTX_BIT2_SHIFT             2
-#define XSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM3_MASK      0x1
-#define XSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM3_SHIFT     3
-#define XSTORM_RDMA_CONN_AG_CTX_BIT4_MASK              0x1
-#define XSTORM_RDMA_CONN_AG_CTX_BIT4_SHIFT             4
-#define XSTORM_RDMA_CONN_AG_CTX_BIT5_MASK              0x1
-#define XSTORM_RDMA_CONN_AG_CTX_BIT5_SHIFT             5
-#define XSTORM_RDMA_CONN_AG_CTX_BIT6_MASK              0x1
-#define XSTORM_RDMA_CONN_AG_CTX_BIT6_SHIFT             6
-#define XSTORM_RDMA_CONN_AG_CTX_BIT7_MASK              0x1
-#define XSTORM_RDMA_CONN_AG_CTX_BIT7_SHIFT             7
+#define E4_XSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_MASK   0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_SHIFT  0
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT1_MASK           0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT          1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT2_MASK           0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT2_SHIFT          2
+#define E4_XSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM3_MASK   0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM3_SHIFT  3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT4_MASK           0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT4_SHIFT          4
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT5_MASK           0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT5_SHIFT          5
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT6_MASK           0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT6_SHIFT          6
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT7_MASK           0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT7_SHIFT          7
        u8 flags1;
-#define XSTORM_RDMA_CONN_AG_CTX_BIT8_MASK              0x1
-#define XSTORM_RDMA_CONN_AG_CTX_BIT8_SHIFT             0
-#define XSTORM_RDMA_CONN_AG_CTX_BIT9_MASK              0x1
-#define XSTORM_RDMA_CONN_AG_CTX_BIT9_SHIFT             1
-#define XSTORM_RDMA_CONN_AG_CTX_BIT10_MASK             0x1
-#define XSTORM_RDMA_CONN_AG_CTX_BIT10_SHIFT            2
-#define XSTORM_RDMA_CONN_AG_CTX_BIT11_MASK             0x1
-#define XSTORM_RDMA_CONN_AG_CTX_BIT11_SHIFT            3
-#define XSTORM_RDMA_CONN_AG_CTX_BIT12_MASK             0x1
-#define XSTORM_RDMA_CONN_AG_CTX_BIT12_SHIFT            4
-#define XSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_MASK      0x1
-#define XSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_SHIFT     5
-#define XSTORM_RDMA_CONN_AG_CTX_BIT14_MASK             0x1
-#define XSTORM_RDMA_CONN_AG_CTX_BIT14_SHIFT            6
-#define XSTORM_RDMA_CONN_AG_CTX_YSTORM_FLUSH_MASK      0x1
-#define XSTORM_RDMA_CONN_AG_CTX_YSTORM_FLUSH_SHIFT     7
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT8_MASK           0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT8_SHIFT          0
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT9_MASK           0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT9_SHIFT          1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT10_MASK          0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT10_SHIFT         2
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT11_MASK          0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT11_SHIFT         3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT12_MASK          0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT12_SHIFT         4
+#define E4_XSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_MASK   0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_SHIFT  5
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT14_MASK          0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT14_SHIFT         6
+#define E4_XSTORM_RDMA_CONN_AG_CTX_YSTORM_FLUSH_MASK   0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_YSTORM_FLUSH_SHIFT  7
        u8 flags2;
-#define XSTORM_RDMA_CONN_AG_CTX_CF0_MASK               0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF0_SHIFT              0
-#define XSTORM_RDMA_CONN_AG_CTX_CF1_MASK               0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF1_SHIFT              2
-#define XSTORM_RDMA_CONN_AG_CTX_CF2_MASK               0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF2_SHIFT              4
-#define XSTORM_RDMA_CONN_AG_CTX_CF3_MASK               0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF3_SHIFT              6
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF0_MASK    0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF0_SHIFT   0
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF1_MASK    0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF1_SHIFT   2
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF2_MASK    0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF2_SHIFT   4
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF3_MASK    0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF3_SHIFT   6
        u8 flags3;
-#define XSTORM_RDMA_CONN_AG_CTX_CF4_MASK               0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF4_SHIFT              0
-#define XSTORM_RDMA_CONN_AG_CTX_CF5_MASK               0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF5_SHIFT              2
-#define XSTORM_RDMA_CONN_AG_CTX_CF6_MASK               0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF6_SHIFT              4
-#define XSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_MASK       0x3
-#define XSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT      6
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF4_MASK            0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF4_SHIFT           0
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF5_MASK            0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF5_SHIFT           2
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF6_MASK            0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF6_SHIFT           4
+#define E4_XSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_MASK    0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT   6
        u8 flags4;
-#define XSTORM_RDMA_CONN_AG_CTX_CF8_MASK               0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF8_SHIFT              0
-#define XSTORM_RDMA_CONN_AG_CTX_CF9_MASK               0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF9_SHIFT              2
-#define XSTORM_RDMA_CONN_AG_CTX_CF10_MASK              0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF10_SHIFT             4
-#define XSTORM_RDMA_CONN_AG_CTX_CF11_MASK              0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF11_SHIFT             6
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF8_MASK    0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF8_SHIFT   0
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF9_MASK    0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF9_SHIFT   2
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF10_MASK   0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF10_SHIFT  4
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF11_MASK   0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF11_SHIFT  6
        u8 flags5;
-#define XSTORM_RDMA_CONN_AG_CTX_CF12_MASK              0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF12_SHIFT             0
-#define XSTORM_RDMA_CONN_AG_CTX_CF13_MASK              0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF13_SHIFT             2
-#define XSTORM_RDMA_CONN_AG_CTX_CF14_MASK              0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF14_SHIFT             4
-#define XSTORM_RDMA_CONN_AG_CTX_CF15_MASK              0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF15_SHIFT             6
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF12_MASK   0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF12_SHIFT  0
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF13_MASK   0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF13_SHIFT  2
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF14_MASK   0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF14_SHIFT  4
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF15_MASK   0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF15_SHIFT  6
        u8 flags6;
-#define XSTORM_RDMA_CONN_AG_CTX_CF16_MASK              0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF16_SHIFT             0
-#define XSTORM_RDMA_CONN_AG_CTX_CF17_MASK              0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF17_SHIFT             2
-#define XSTORM_RDMA_CONN_AG_CTX_CF18_MASK              0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF18_SHIFT             4
-#define XSTORM_RDMA_CONN_AG_CTX_CF19_MASK              0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF19_SHIFT             6
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF16_MASK   0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF16_SHIFT  0
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF17_MASK   0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF17_SHIFT  2
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF18_MASK   0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF18_SHIFT  4
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF19_MASK   0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF19_SHIFT  6
        u8 flags7;
-#define XSTORM_RDMA_CONN_AG_CTX_CF20_MASK              0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF20_SHIFT             0
-#define XSTORM_RDMA_CONN_AG_CTX_CF21_MASK              0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF21_SHIFT             2
-#define XSTORM_RDMA_CONN_AG_CTX_SLOW_PATH_MASK         0x3
-#define XSTORM_RDMA_CONN_AG_CTX_SLOW_PATH_SHIFT        4
-#define XSTORM_RDMA_CONN_AG_CTX_CF0EN_MASK             0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF0EN_SHIFT            6
-#define XSTORM_RDMA_CONN_AG_CTX_CF1EN_MASK             0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT            7
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF20_MASK           0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF20_SHIFT          0
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF21_MASK           0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF21_SHIFT          2
+#define E4_XSTORM_RDMA_CONN_AG_CTX_SLOW_PATH_MASK      0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_SLOW_PATH_SHIFT     4
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF0EN_MASK          0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF0EN_SHIFT         6
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF1EN_MASK          0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT         7
        u8 flags8;
-#define XSTORM_RDMA_CONN_AG_CTX_CF2EN_MASK             0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT            0
-#define XSTORM_RDMA_CONN_AG_CTX_CF3EN_MASK             0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF3EN_SHIFT            1
-#define XSTORM_RDMA_CONN_AG_CTX_CF4EN_MASK             0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF4EN_SHIFT            2
-#define XSTORM_RDMA_CONN_AG_CTX_CF5EN_MASK             0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF5EN_SHIFT            3
-#define XSTORM_RDMA_CONN_AG_CTX_CF6EN_MASK             0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF6EN_SHIFT            4
-#define XSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK    0x1
-#define XSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT   5
-#define XSTORM_RDMA_CONN_AG_CTX_CF8EN_MASK             0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF8EN_SHIFT            6
-#define XSTORM_RDMA_CONN_AG_CTX_CF9EN_MASK             0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF9EN_SHIFT            7
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF2EN_MASK          0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT         0
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF3EN_MASK          0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF3EN_SHIFT         1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF4EN_MASK          0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF4EN_SHIFT         2
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF5EN_MASK          0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF5EN_SHIFT         3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF6EN_MASK          0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF6EN_SHIFT         4
+#define E4_XSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT        5
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF8EN_MASK          0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF8EN_SHIFT         6
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF9EN_MASK          0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF9EN_SHIFT         7
        u8 flags9;
-#define XSTORM_RDMA_CONN_AG_CTX_CF10EN_MASK            0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF10EN_SHIFT           0
-#define XSTORM_RDMA_CONN_AG_CTX_CF11EN_MASK            0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF11EN_SHIFT           1
-#define XSTORM_RDMA_CONN_AG_CTX_CF12EN_MASK            0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF12EN_SHIFT           2
-#define XSTORM_RDMA_CONN_AG_CTX_CF13EN_MASK            0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF13EN_SHIFT           3
-#define XSTORM_RDMA_CONN_AG_CTX_CF14EN_MASK            0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF14EN_SHIFT           4
-#define XSTORM_RDMA_CONN_AG_CTX_CF15EN_MASK            0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF15EN_SHIFT           5
-#define XSTORM_RDMA_CONN_AG_CTX_CF16EN_MASK            0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF16EN_SHIFT           6
-#define XSTORM_RDMA_CONN_AG_CTX_CF17EN_MASK            0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF17EN_SHIFT           7
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF10EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF10EN_SHIFT        0
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF11EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF11EN_SHIFT        1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF12EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF12EN_SHIFT        2
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF13EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF13EN_SHIFT        3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF14EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF14EN_SHIFT        4
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF15EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF15EN_SHIFT        5
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF16EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF16EN_SHIFT        6
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF17EN_MASK 0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF17EN_SHIFT        7
        u8 flags10;
-#define XSTORM_RDMA_CONN_AG_CTX_CF18EN_MASK            0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF18EN_SHIFT           0
-#define XSTORM_RDMA_CONN_AG_CTX_CF19EN_MASK            0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF19EN_SHIFT           1
-#define XSTORM_RDMA_CONN_AG_CTX_CF20EN_MASK            0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF20EN_SHIFT           2
-#define XSTORM_RDMA_CONN_AG_CTX_CF21EN_MASK            0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF21EN_SHIFT           3
-#define XSTORM_RDMA_CONN_AG_CTX_SLOW_PATH_EN_MASK      0x1
-#define XSTORM_RDMA_CONN_AG_CTX_SLOW_PATH_EN_SHIFT     4
-#define XSTORM_RDMA_CONN_AG_CTX_CF23EN_MASK            0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF23EN_SHIFT           5
-#define XSTORM_RDMA_CONN_AG_CTX_RULE0EN_MASK           0x1
-#define XSTORM_RDMA_CONN_AG_CTX_RULE0EN_SHIFT          6
-#define XSTORM_RDMA_CONN_AG_CTX_RULE1EN_MASK           0x1
-#define XSTORM_RDMA_CONN_AG_CTX_RULE1EN_SHIFT          7
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF18EN_MASK         0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF18EN_SHIFT                0
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF19EN_MASK         0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF19EN_SHIFT                1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF20EN_MASK         0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF20EN_SHIFT                2
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF21EN_MASK         0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF21EN_SHIFT                3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_SLOW_PATH_EN_MASK   0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_SLOW_PATH_EN_SHIFT  4
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF23EN_MASK         0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF23EN_SHIFT                5
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE0EN_MASK                0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE0EN_SHIFT       6
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE1EN_MASK                0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE1EN_SHIFT       7
        u8 flags11;
-#define XSTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK           0x1
-#define XSTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT          0
-#define XSTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK           0x1
-#define XSTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT          1
-#define XSTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK           0x1
-#define XSTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT          2
-#define XSTORM_RDMA_CONN_AG_CTX_RULE5EN_MASK           0x1
-#define XSTORM_RDMA_CONN_AG_CTX_RULE5EN_SHIFT          3
-#define XSTORM_RDMA_CONN_AG_CTX_RULE6EN_MASK           0x1
-#define XSTORM_RDMA_CONN_AG_CTX_RULE6EN_SHIFT          4
-#define XSTORM_RDMA_CONN_AG_CTX_RULE7EN_MASK           0x1
-#define XSTORM_RDMA_CONN_AG_CTX_RULE7EN_SHIFT          5
-#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED1_MASK      0x1
-#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED1_SHIFT     6
-#define XSTORM_RDMA_CONN_AG_CTX_RULE9EN_MASK           0x1
-#define XSTORM_RDMA_CONN_AG_CTX_RULE9EN_SHIFT          7
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK                0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT       0
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK                0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT       1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK                0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT       2
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE5EN_MASK                0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE5EN_SHIFT       3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE6EN_MASK                0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE6EN_SHIFT       4
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE7EN_MASK                0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE7EN_SHIFT       5
+#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED1_MASK   0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED1_SHIFT  6
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE9EN_MASK                0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE9EN_SHIFT       7
        u8 flags12;
-#define XSTORM_RDMA_CONN_AG_CTX_RULE10EN_MASK          0x1
-#define XSTORM_RDMA_CONN_AG_CTX_RULE10EN_SHIFT         0
-#define XSTORM_RDMA_CONN_AG_CTX_RULE11EN_MASK          0x1
-#define XSTORM_RDMA_CONN_AG_CTX_RULE11EN_SHIFT         1
-#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED2_MASK      0x1
-#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED2_SHIFT     2
-#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED3_MASK      0x1
-#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED3_SHIFT     3
-#define XSTORM_RDMA_CONN_AG_CTX_RULE14EN_MASK          0x1
-#define XSTORM_RDMA_CONN_AG_CTX_RULE14EN_SHIFT         4
-#define XSTORM_RDMA_CONN_AG_CTX_RULE15EN_MASK          0x1
-#define XSTORM_RDMA_CONN_AG_CTX_RULE15EN_SHIFT         5
-#define XSTORM_RDMA_CONN_AG_CTX_RULE16EN_MASK          0x1
-#define XSTORM_RDMA_CONN_AG_CTX_RULE16EN_SHIFT         6
-#define XSTORM_RDMA_CONN_AG_CTX_RULE17EN_MASK          0x1
-#define XSTORM_RDMA_CONN_AG_CTX_RULE17EN_SHIFT         7
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE10EN_MASK       0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE10EN_SHIFT      0
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE11EN_MASK       0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE11EN_SHIFT      1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED2_MASK   0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED2_SHIFT  2
+#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED3_MASK   0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED3_SHIFT  3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE14EN_MASK       0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE14EN_SHIFT      4
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE15EN_MASK       0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE15EN_SHIFT      5
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE16EN_MASK       0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE16EN_SHIFT      6
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE17EN_MASK       0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE17EN_SHIFT      7
        u8 flags13;
-#define XSTORM_RDMA_CONN_AG_CTX_RULE18EN_MASK          0x1
-#define XSTORM_RDMA_CONN_AG_CTX_RULE18EN_SHIFT         0
-#define XSTORM_RDMA_CONN_AG_CTX_RULE19EN_MASK          0x1
-#define XSTORM_RDMA_CONN_AG_CTX_RULE19EN_SHIFT         1
-#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED4_MASK      0x1
-#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED4_SHIFT     2
-#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED5_MASK      0x1
-#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED5_SHIFT     3
-#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED6_MASK      0x1
-#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED6_SHIFT     4
-#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED7_MASK      0x1
-#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED7_SHIFT     5
-#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED8_MASK      0x1
-#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED8_SHIFT     6
-#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED9_MASK      0x1
-#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED9_SHIFT     7
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE18EN_MASK       0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE18EN_SHIFT      0
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE19EN_MASK       0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE19EN_SHIFT      1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED4_MASK   0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED4_SHIFT  2
+#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED5_MASK   0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED5_SHIFT  3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED6_MASK   0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED6_SHIFT  4
+#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED7_MASK   0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED7_SHIFT  5
+#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED8_MASK   0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED8_SHIFT  6
+#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED9_MASK   0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED9_SHIFT  7
        u8 flags14;
-#define XSTORM_RDMA_CONN_AG_CTX_MIGRATION_MASK         0x1
-#define XSTORM_RDMA_CONN_AG_CTX_MIGRATION_SHIFT        0
-#define XSTORM_RDMA_CONN_AG_CTX_BIT17_MASK             0x1
-#define XSTORM_RDMA_CONN_AG_CTX_BIT17_SHIFT            1
-#define XSTORM_RDMA_CONN_AG_CTX_DPM_PORT_NUM_MASK      0x3
-#define XSTORM_RDMA_CONN_AG_CTX_DPM_PORT_NUM_SHIFT     2
-#define XSTORM_RDMA_CONN_AG_CTX_RESERVED_MASK          0x1
-#define XSTORM_RDMA_CONN_AG_CTX_RESERVED_SHIFT         4
-#define XSTORM_RDMA_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK  0x1
-#define XSTORM_RDMA_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
-#define XSTORM_RDMA_CONN_AG_CTX_CF23_MASK              0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF23_SHIFT             6
+#define E4_XSTORM_RDMA_CONN_AG_CTX_MIGRATION_MASK              0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_MIGRATION_SHIFT             0
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT17_MASK                  0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT17_SHIFT                 1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_DPM_PORT_NUM_MASK           0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_DPM_PORT_NUM_SHIFT          2
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RESERVED_MASK               0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RESERVED_SHIFT              4
+#define E4_XSTORM_RDMA_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK       0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT      5
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF23_MASK                   0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF23_SHIFT                  6
        u8 byte2;
        __le16 physical_q0;
        __le16 word1;
@@ -7301,37 +7853,37 @@ struct xstorm_rdma_conn_ag_ctx {
        __le32 reg6;
 };
 
-struct ystorm_rdma_conn_ag_ctx {
+struct e4_ystorm_rdma_conn_ag_ctx {
        u8 byte0;
        u8 byte1;
        u8 flags0;
-#define YSTORM_RDMA_CONN_AG_CTX_BIT0_MASK     0x1
-#define YSTORM_RDMA_CONN_AG_CTX_BIT0_SHIFT    0
-#define YSTORM_RDMA_CONN_AG_CTX_BIT1_MASK     0x1
-#define YSTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT    1
-#define YSTORM_RDMA_CONN_AG_CTX_CF0_MASK      0x3
-#define YSTORM_RDMA_CONN_AG_CTX_CF0_SHIFT     2
-#define YSTORM_RDMA_CONN_AG_CTX_CF1_MASK      0x3
-#define YSTORM_RDMA_CONN_AG_CTX_CF1_SHIFT     4
-#define YSTORM_RDMA_CONN_AG_CTX_CF2_MASK      0x3
-#define YSTORM_RDMA_CONN_AG_CTX_CF2_SHIFT     6
+#define E4_YSTORM_RDMA_CONN_AG_CTX_BIT0_MASK   0x1
+#define E4_YSTORM_RDMA_CONN_AG_CTX_BIT0_SHIFT  0
+#define E4_YSTORM_RDMA_CONN_AG_CTX_BIT1_MASK   0x1
+#define E4_YSTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT  1
+#define E4_YSTORM_RDMA_CONN_AG_CTX_CF0_MASK    0x3
+#define E4_YSTORM_RDMA_CONN_AG_CTX_CF0_SHIFT   2
+#define E4_YSTORM_RDMA_CONN_AG_CTX_CF1_MASK    0x3
+#define E4_YSTORM_RDMA_CONN_AG_CTX_CF1_SHIFT   4
+#define E4_YSTORM_RDMA_CONN_AG_CTX_CF2_MASK    0x3
+#define E4_YSTORM_RDMA_CONN_AG_CTX_CF2_SHIFT   6
        u8 flags1;
-#define YSTORM_RDMA_CONN_AG_CTX_CF0EN_MASK    0x1
-#define YSTORM_RDMA_CONN_AG_CTX_CF0EN_SHIFT   0
-#define YSTORM_RDMA_CONN_AG_CTX_CF1EN_MASK    0x1
-#define YSTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT   1
-#define YSTORM_RDMA_CONN_AG_CTX_CF2EN_MASK    0x1
-#define YSTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT   2
-#define YSTORM_RDMA_CONN_AG_CTX_RULE0EN_MASK  0x1
-#define YSTORM_RDMA_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define YSTORM_RDMA_CONN_AG_CTX_RULE1EN_MASK  0x1
-#define YSTORM_RDMA_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define YSTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK  0x1
-#define YSTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define YSTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK  0x1
-#define YSTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define YSTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK  0x1
-#define YSTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define E4_YSTORM_RDMA_CONN_AG_CTX_CF0EN_MASK          0x1
+#define E4_YSTORM_RDMA_CONN_AG_CTX_CF0EN_SHIFT         0
+#define E4_YSTORM_RDMA_CONN_AG_CTX_CF1EN_MASK          0x1
+#define E4_YSTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT         1
+#define E4_YSTORM_RDMA_CONN_AG_CTX_CF2EN_MASK          0x1
+#define E4_YSTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT         2
+#define E4_YSTORM_RDMA_CONN_AG_CTX_RULE0EN_MASK                0x1
+#define E4_YSTORM_RDMA_CONN_AG_CTX_RULE0EN_SHIFT       3
+#define E4_YSTORM_RDMA_CONN_AG_CTX_RULE1EN_MASK                0x1
+#define E4_YSTORM_RDMA_CONN_AG_CTX_RULE1EN_SHIFT       4
+#define E4_YSTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK                0x1
+#define E4_YSTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT       5
+#define E4_YSTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK                0x1
+#define E4_YSTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT       6
+#define E4_YSTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK                0x1
+#define E4_YSTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT       7
        u8 byte2;
        u8 byte3;
        __le16 word0;
@@ -7345,62 +7897,70 @@ struct ystorm_rdma_conn_ag_ctx {
        __le32 reg3;
 };
 
-struct mstorm_roce_conn_st_ctx {
-       struct regpair temp[6];
+/* The roce storm context of Ystorm */
+struct ystorm_roce_conn_st_ctx {
+       struct regpair temp[2];
 };
 
+/* The roce storm context of Mstorm */
 struct pstorm_roce_conn_st_ctx {
        struct regpair temp[16];
 };
 
-struct ystorm_roce_conn_st_ctx {
-       struct regpair temp[2];
-};
-
+/* The roce storm context of Xstorm */
 struct xstorm_roce_conn_st_ctx {
        struct regpair temp[24];
 };
 
+/* The roce storm context of Tstorm */
 struct tstorm_roce_conn_st_ctx {
        struct regpair temp[30];
 };
 
+/* The roce storm context of Mstorm */
+struct mstorm_roce_conn_st_ctx {
+       struct regpair temp[6];
+};
+
+/* The roce storm context of Ystorm */
 struct ustorm_roce_conn_st_ctx {
        struct regpair temp[12];
 };
 
-struct roce_conn_context {
+/* roce connection context */
+struct e4_roce_conn_context {
        struct ystorm_roce_conn_st_ctx ystorm_st_context;
        struct regpair ystorm_st_padding[2];
        struct pstorm_roce_conn_st_ctx pstorm_st_context;
        struct xstorm_roce_conn_st_ctx xstorm_st_context;
        struct regpair xstorm_st_padding[2];
-       struct xstorm_rdma_conn_ag_ctx xstorm_ag_context;
-       struct tstorm_rdma_conn_ag_ctx tstorm_ag_context;
+       struct e4_xstorm_rdma_conn_ag_ctx xstorm_ag_context;
+       struct e4_tstorm_rdma_conn_ag_ctx tstorm_ag_context;
        struct timers_context timer_context;
-       struct ustorm_rdma_conn_ag_ctx ustorm_ag_context;
+       struct e4_ustorm_rdma_conn_ag_ctx ustorm_ag_context;
        struct tstorm_roce_conn_st_ctx tstorm_st_context;
        struct mstorm_roce_conn_st_ctx mstorm_st_context;
        struct ustorm_roce_conn_st_ctx ustorm_st_context;
        struct regpair ustorm_st_padding[2];
 };
 
+/* roce create qp requester ramrod data */
 struct roce_create_qp_req_ramrod_data {
        __le16 flags;
-#define ROCE_CREATE_QP_REQ_RAMROD_DATA_ROCE_FLAVOR_MASK          0x3
-#define ROCE_CREATE_QP_REQ_RAMROD_DATA_ROCE_FLAVOR_SHIFT         0
-#define ROCE_CREATE_QP_REQ_RAMROD_DATA_FMR_AND_RESERVED_EN_MASK  0x1
-#define ROCE_CREATE_QP_REQ_RAMROD_DATA_FMR_AND_RESERVED_EN_SHIFT 2
-#define ROCE_CREATE_QP_REQ_RAMROD_DATA_SIGNALED_COMP_MASK        0x1
-#define ROCE_CREATE_QP_REQ_RAMROD_DATA_SIGNALED_COMP_SHIFT       3
-#define ROCE_CREATE_QP_REQ_RAMROD_DATA_PRI_MASK                  0x7
-#define ROCE_CREATE_QP_REQ_RAMROD_DATA_PRI_SHIFT                 4
-#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_MASK             0x1
-#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_SHIFT            7
-#define ROCE_CREATE_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_MASK        0xF
-#define ROCE_CREATE_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_SHIFT       8
-#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_MASK          0xF
-#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_SHIFT         12
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_ROCE_FLAVOR_MASK                        0x3
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_ROCE_FLAVOR_SHIFT               0
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_FMR_AND_RESERVED_EN_MASK                0x1
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_FMR_AND_RESERVED_EN_SHIFT       2
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_SIGNALED_COMP_MASK              0x1
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_SIGNALED_COMP_SHIFT             3
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_PRI_MASK                                0x7
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_PRI_SHIFT                       4
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_MASK                   0x1
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_SHIFT                  7
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_MASK              0xF
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_SHIFT             8
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_MASK                        0xF
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_SHIFT               12
        u8 max_ord;
        u8 traffic_class;
        u8 hop_limit;
@@ -7431,26 +7991,27 @@ struct roce_create_qp_req_ramrod_data {
        __le16 dpi;
 };
 
+/* roce create qp responder ramrod data */
 struct roce_create_qp_resp_ramrod_data {
        __le16 flags;
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_ROCE_FLAVOR_MASK          0x3
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_ROCE_FLAVOR_SHIFT         0
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_RD_EN_MASK           0x1
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_RD_EN_SHIFT          2
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_WR_EN_MASK           0x1
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_WR_EN_SHIFT          3
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_ATOMIC_EN_MASK            0x1
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_ATOMIC_EN_SHIFT           4
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_SRQ_FLG_MASK              0x1
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_SRQ_FLG_SHIFT             5
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN_MASK  0x1
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN_SHIFT 6
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_KEY_EN_MASK   0x1
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_KEY_EN_SHIFT  7
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_PRI_MASK                  0x7
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_PRI_SHIFT                 8
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_MASK    0x1F
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_SHIFT   11
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_ROCE_FLAVOR_MASK               0x3
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_ROCE_FLAVOR_SHIFT              0
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_RD_EN_MASK                        0x1
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_RD_EN_SHIFT               2
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_WR_EN_MASK                        0x1
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_WR_EN_SHIFT               3
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_ATOMIC_EN_MASK                 0x1
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_ATOMIC_EN_SHIFT                        4
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_SRQ_FLG_MASK                   0x1
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_SRQ_FLG_SHIFT                  5
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN_MASK       0x1
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN_SHIFT      6
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_KEY_EN_MASK           0x1
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_KEY_EN_SHIFT          7
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_PRI_MASK                       0x7
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_PRI_SHIFT                      8
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_MASK         0x1F
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_SHIFT                11
        u8 max_ird;
        u8 traffic_class;
        u8 hop_limit;
@@ -7482,24 +8043,40 @@ struct roce_create_qp_resp_ramrod_data {
        __le16 dpi;
 };
 
+/* roce DCQCN received statistics */
+struct roce_dcqcn_received_stats {
+       struct regpair ecn_pkt_rcv;
+       struct regpair cnp_pkt_rcv;
+};
+
+/* roce DCQCN sent statistics */
+struct roce_dcqcn_sent_stats {
+       struct regpair cnp_pkt_sent;
+};
+
+/* RoCE destroy qp requester output params */
 struct roce_destroy_qp_req_output_params {
        __le32 num_bound_mw;
        __le32 cq_prod;
 };
 
+/* RoCE destroy qp requester ramrod data */
 struct roce_destroy_qp_req_ramrod_data {
        struct regpair output_params_addr;
 };
 
+/* RoCE destroy qp responder output params */
 struct roce_destroy_qp_resp_output_params {
        __le32 num_invalidated_mw;
        __le32 cq_prod;
 };
 
+/* RoCE destroy qp responder ramrod data */
 struct roce_destroy_qp_resp_ramrod_data {
        struct regpair output_params_addr;
 };
 
+/* roce special events statistics */
 struct roce_events_stats {
        __le16 silent_drops;
        __le16 rnr_naks_sent;
@@ -7508,6 +8085,7 @@ struct roce_events_stats {
        __le32 reserved;
 };
 
+/* ROCE slow path EQ cmd IDs */
 enum roce_event_opcode {
        ROCE_EVENT_CREATE_QP = 11,
        ROCE_EVENT_MODIFY_QP,
@@ -7518,6 +8096,7 @@ enum roce_event_opcode {
        MAX_ROCE_EVENT_OPCODE
 };
 
+/* roce func init ramrod data */
 struct roce_init_func_params {
        u8 ll2_queue_id;
        u8 cnp_vlan_priority;
@@ -7526,42 +8105,46 @@ struct roce_init_func_params {
        __le32 cnp_send_timeout;
 };
 
+/* roce func init ramrod data */
 struct roce_init_func_ramrod_data {
        struct rdma_init_func_ramrod_data rdma;
        struct roce_init_func_params roce;
 };
 
+/* roce modify qp requester ramrod data */
 struct roce_modify_qp_req_ramrod_data {
        __le16 flags;
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_ERR_FLG_MASK      0x1
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_ERR_FLG_SHIFT     0
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_SQD_FLG_MASK      0x1
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_SQD_FLG_SHIFT     1
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_EN_SQD_ASYNC_NOTIFY_MASK  0x1
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_EN_SQD_ASYNC_NOTIFY_SHIFT 2
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_P_KEY_FLG_MASK            0x1
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_P_KEY_FLG_SHIFT           3
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ADDRESS_VECTOR_FLG_MASK   0x1
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ADDRESS_VECTOR_FLG_SHIFT  4
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MAX_ORD_FLG_MASK          0x1
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MAX_ORD_FLG_SHIFT         5
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_FLG_MASK      0x1
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_FLG_SHIFT     6
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_FLG_MASK    0x1
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_FLG_SHIFT   7
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ACK_TIMEOUT_FLG_MASK      0x1
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ACK_TIMEOUT_FLG_SHIFT     8
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_FLG_MASK              0x1
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_FLG_SHIFT             9
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_MASK                  0x7
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_SHIFT                 10
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RESERVED1_MASK            0x7
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RESERVED1_SHIFT           13
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_ERR_FLG_MASK            0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_ERR_FLG_SHIFT           0
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_SQD_FLG_MASK            0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_SQD_FLG_SHIFT           1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_EN_SQD_ASYNC_NOTIFY_MASK                0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_EN_SQD_ASYNC_NOTIFY_SHIFT       2
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_P_KEY_FLG_MASK                  0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_P_KEY_FLG_SHIFT                 3
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ADDRESS_VECTOR_FLG_MASK         0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ADDRESS_VECTOR_FLG_SHIFT                4
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MAX_ORD_FLG_MASK                        0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MAX_ORD_FLG_SHIFT               5
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_FLG_MASK            0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_FLG_SHIFT           6
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_FLG_MASK          0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_FLG_SHIFT         7
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ACK_TIMEOUT_FLG_MASK            0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ACK_TIMEOUT_FLG_SHIFT           8
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_FLG_MASK                    0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_FLG_SHIFT                   9
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_MASK                                0x7
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_SHIFT                       10
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PHYSICAL_QUEUES_FLG_MASK                0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PHYSICAL_QUEUES_FLG_SHIFT       13
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RESERVED1_MASK                  0x3
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RESERVED1_SHIFT                 14
        u8 fields;
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_MASK        0xF
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_SHIFT       0
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_MASK          0xF
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_SHIFT         4
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_MASK      0xF
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_SHIFT     0
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_MASK                0xF
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_SHIFT       4
        u8 max_ord;
        u8 traffic_class;
        u8 hop_limit;
@@ -7570,66 +8153,76 @@ struct roce_modify_qp_req_ramrod_data {
        __le32 ack_timeout_val;
        __le16 mtu;
        __le16 reserved2;
-       __le32 reserved3[3];
+       __le32 reserved3[2];
+       __le16 low_latency_phy_queue;
+       __le16 regular_latency_phy_queue;
        __le32 src_gid[4];
        __le32 dst_gid[4];
 };
 
+/* roce modify qp responder ramrod data */
 struct roce_modify_qp_resp_ramrod_data {
        __le16 flags;
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MOVE_TO_ERR_FLG_MASK        0x1
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MOVE_TO_ERR_FLG_SHIFT       0
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_RD_EN_MASK             0x1
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_RD_EN_SHIFT            1
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_WR_EN_MASK             0x1
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_WR_EN_SHIFT            2
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_ATOMIC_EN_MASK              0x1
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_ATOMIC_EN_SHIFT             3
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_P_KEY_FLG_MASK              0x1
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_P_KEY_FLG_SHIFT             4
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_ADDRESS_VECTOR_FLG_MASK     0x1
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_ADDRESS_VECTOR_FLG_SHIFT    5
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MAX_IRD_FLG_MASK            0x1
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MAX_IRD_FLG_SHIFT           6
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_FLG_MASK                0x1
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_FLG_SHIFT               7
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_FLG_MASK  0x1
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_FLG_SHIFT 8
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG_MASK        0x1
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG_SHIFT       9
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RESERVED1_MASK              0x3F
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RESERVED1_SHIFT             10
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MOVE_TO_ERR_FLG_MASK           0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MOVE_TO_ERR_FLG_SHIFT          0
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_RD_EN_MASK                        0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_RD_EN_SHIFT               1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_WR_EN_MASK                        0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_WR_EN_SHIFT               2
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_ATOMIC_EN_MASK                 0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_ATOMIC_EN_SHIFT                        3
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_P_KEY_FLG_MASK                 0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_P_KEY_FLG_SHIFT                        4
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_ADDRESS_VECTOR_FLG_MASK                0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_ADDRESS_VECTOR_FLG_SHIFT       5
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MAX_IRD_FLG_MASK               0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MAX_IRD_FLG_SHIFT              6
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_FLG_MASK                   0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_FLG_SHIFT                  7
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_FLG_MASK     0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_FLG_SHIFT    8
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG_MASK           0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG_SHIFT          9
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PHYSICAL_QUEUES_FLG_MASK       0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PHYSICAL_QUEUES_FLG_SHIFT      10
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RESERVED1_MASK                 0x1F
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RESERVED1_SHIFT                        11
        u8 fields;
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_MASK                    0x7
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_SHIFT                   0
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_MASK      0x1F
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_SHIFT     3
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_MASK               0x7
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_SHIFT              0
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_MASK 0x1F
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_SHIFT        3
        u8 max_ird;
        u8 traffic_class;
        u8 hop_limit;
        __le16 p_key;
        __le32 flow_label;
        __le16 mtu;
-       __le16 reserved2;
+       __le16 low_latency_phy_queue;
+       __le16 regular_latency_phy_queue;
+       u8 reserved2[6];
        __le32 src_gid[4];
        __le32 dst_gid[4];
 };
 
+/* RoCE query qp requester output params */
 struct roce_query_qp_req_output_params {
        __le32 psn;
        __le32 flags;
-#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_ERR_FLG_MASK          0x1
-#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_ERR_FLG_SHIFT         0
-#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_SQ_DRAINING_FLG_MASK  0x1
-#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_SQ_DRAINING_FLG_SHIFT 1
-#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_RESERVED0_MASK        0x3FFFFFFF
-#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_RESERVED0_SHIFT       2
+#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_ERR_FLG_MASK           0x1
+#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_ERR_FLG_SHIFT          0
+#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_SQ_DRAINING_FLG_MASK   0x1
+#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_SQ_DRAINING_FLG_SHIFT  1
+#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_RESERVED0_MASK         0x3FFFFFFF
+#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_RESERVED0_SHIFT                2
 };
 
+/* RoCE query qp requester ramrod data */
 struct roce_query_qp_req_ramrod_data {
        struct regpair output_params_addr;
 };
 
+/* RoCE query qp responder output params */
 struct roce_query_qp_resp_output_params {
        __le32 psn;
        __le32 err_flag;
@@ -7639,10 +8232,12 @@ struct roce_query_qp_resp_output_params {
 #define ROCE_QUERY_QP_RESP_OUTPUT_PARAMS_RESERVED0_SHIFT 1
 };
 
+/* RoCE query qp responder ramrod data */
 struct roce_query_qp_resp_ramrod_data {
        struct regpair output_params_addr;
 };
 
+/* ROCE ramrod command IDs */
 enum roce_ramrod_cmd_id {
        ROCE_RAMROD_CREATE_QP = 11,
        ROCE_RAMROD_MODIFY_QP,
@@ -7653,163 +8248,163 @@ enum roce_ramrod_cmd_id {
        MAX_ROCE_RAMROD_CMD_ID
 };
 
-struct mstorm_roce_req_conn_ag_ctx {
+struct e4_mstorm_roce_req_conn_ag_ctx {
        u8 byte0;
        u8 byte1;
        u8 flags0;
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK     0x1
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT    0
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK     0x1
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT    1
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK      0x3
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT     2
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK      0x3
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT     4
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK      0x3
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT     6
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK       0x1
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT      0
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK       0x1
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT      1
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK                0x3
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT       2
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK                0x3
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT       4
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK                0x3
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT       6
        u8 flags1;
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK    0x1
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT   0
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK    0x1
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT   1
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK    0x1
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT   2
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK  0x1
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK  0x1
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK  0x1
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK  0x1
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK  0x1
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK      0x1
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT     0
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK      0x1
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT     1
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK      0x1
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT     2
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK    0x1
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT   3
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK    0x1
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT   4
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK    0x1
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT   5
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK    0x1
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT   6
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK    0x1
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT   7
        __le16 word0;
        __le16 word1;
        __le32 reg0;
        __le32 reg1;
 };
 
-struct mstorm_roce_resp_conn_ag_ctx {
+struct e4_mstorm_roce_resp_conn_ag_ctx {
        u8 byte0;
        u8 byte1;
        u8 flags0;
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK     0x1
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT    0
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK     0x1
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT    1
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK      0x3
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT     2
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK      0x3
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT     4
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK      0x3
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT     6
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK      0x1
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT     0
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK      0x1
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT     1
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK       0x3
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT      2
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK       0x3
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT      4
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK       0x3
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT      6
        u8 flags1;
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK    0x1
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT   0
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK    0x1
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT   1
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK    0x1
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT   2
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK  0x1
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK  0x1
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK  0x1
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK  0x1
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK  0x1
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK     0x1
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT    0
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK     0x1
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT    1
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK     0x1
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT    2
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK   0x1
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT  3
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK   0x1
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT  4
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK   0x1
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT  5
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK   0x1
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT  6
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK   0x1
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT  7
        __le16 word0;
        __le16 word1;
        __le32 reg0;
        __le32 reg1;
 };
 
-struct tstorm_roce_req_conn_ag_ctx {
+struct e4_tstorm_roce_req_conn_ag_ctx {
        u8 reserved0;
        u8 state;
        u8 flags0;
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_MASK                0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_SHIFT               0
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_OCCURED_MASK            0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_OCCURED_SHIFT           1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_CQE_ERROR_OCCURED_MASK        0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_CQE_ERROR_OCCURED_SHIFT       2
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_BIT3_MASK                        0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_BIT3_SHIFT                       3
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_MASK                0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_SHIFT               4
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_CACHED_ORQ_MASK                  0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_CACHED_ORQ_SHIFT                 5
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_MASK                    0x3
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_SHIFT                   6
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_MASK               0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_SHIFT              0
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_OCCURRED_MASK          0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_OCCURRED_SHIFT         1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TX_CQE_ERROR_OCCURRED_MASK      0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TX_CQE_ERROR_OCCURRED_SHIFT     2
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_BIT3_MASK                       0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_BIT3_SHIFT                      3
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_MASK               0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_SHIFT              4
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_CACHED_ORQ_MASK                 0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_CACHED_ORQ_SHIFT                        5
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_MASK                   0x3
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_SHIFT                  6
        u8 flags1;
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK                         0x3
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT                        0
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_MASK                 0x3
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_SHIFT                2
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK           0x3
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT          4
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_MASK                 0x3
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT                6
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK                                0x3
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT                       0
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_MASK                        0x3
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_SHIFT               2
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK          0x3
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT         4
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_MASK                        0x3
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT               6
        u8 flags2;
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK             0x3
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT            0
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_MASK                0x3
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_SHIFT               2
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_MASK           0x3
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_SHIFT          4
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_MASK               0x3
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_SHIFT              6
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK    0x3
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT   0
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_MASK       0x3
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_SHIFT      2
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_MASK  0x3
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_SHIFT 4
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_MASK      0x3
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_SHIFT     6
        u8 flags3;
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_MASK     0x3
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_SHIFT    0
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_MASK       0x3
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_SHIFT      2
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_EN_MASK                 0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_EN_SHIFT                4
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK                       0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT                      5
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_EN_MASK              0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_EN_SHIFT             6
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK        0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT       7
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_MASK    0x3
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_SHIFT   0
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_MASK      0x3
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_SHIFT     2
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_EN_MASK                        0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_EN_SHIFT               4
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK                      0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT                     5
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_EN_MASK             0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_EN_SHIFT            6
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK       0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT      7
        u8 flags4;
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK              0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT             0
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK          0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT         1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_EN_MASK             0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_EN_SHIFT            2
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_EN_MASK        0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_EN_SHIFT       3
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_EN_MASK            0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_EN_SHIFT           4
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_EN_MASK  0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_EN_SHIFT 5
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_EN_MASK    0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_EN_SHIFT   6
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK                     0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT                    7
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK             0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT            0
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK         0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT                1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_EN_MASK            0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_EN_SHIFT           2
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_EN_MASK       0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_EN_SHIFT      3
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_EN_MASK           0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_EN_SHIFT          4
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_EN_MASK 0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_EN_SHIFT        5
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_EN_MASK   0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_EN_SHIFT  6
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK                    0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT                   7
        u8 flags5;
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK                     0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT                    0
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK                     0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT                    1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK                     0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT                    2
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK                     0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT                    3
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK                     0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT                    4
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_SND_SQ_CONS_EN_MASK              0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_SND_SQ_CONS_EN_SHIFT             5
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_MASK                     0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_SHIFT                    6
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_MASK                     0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_SHIFT                    7
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK            0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT           0
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK            0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT           1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK            0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT           2
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK            0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT           3
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK            0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT           4
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SND_SQ_CONS_EN_MASK     0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SND_SQ_CONS_EN_SHIFT    5
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_MASK            0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_SHIFT           6
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_MASK            0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_SHIFT           7
        __le32 reg0;
        __le32 snd_nxt_psn;
        __le32 snd_max_psn;
@@ -7825,95 +8420,95 @@ struct tstorm_roce_req_conn_ag_ctx {
        u8 byte4;
        u8 byte5;
        __le16 snd_sq_cons;
-       __le16 word2;
+       __le16 conn_dpi;
        __le16 word3;
        __le32 reg9;
        __le32 reg10;
 };
 
-struct tstorm_roce_resp_conn_ag_ctx {
+struct e4_tstorm_roce_resp_conn_ag_ctx {
        u8 byte0;
        u8 state;
        u8 flags0;
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_MASK        0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT       0
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_NOTIFY_REQUESTER_MASK  0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_NOTIFY_REQUESTER_SHIFT 1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT2_MASK                0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT2_SHIFT               2
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT3_MASK                0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT3_SHIFT               3
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_MASK        0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_SHIFT       4
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT5_MASK                0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT5_SHIFT               5
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK                 0x3
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT                6
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_MASK              0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT             0
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_NOTIFY_REQUESTER_MASK 0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_NOTIFY_REQUESTER_SHIFT        1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_BIT2_MASK                      0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_BIT2_SHIFT                     2
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_BIT3_MASK                      0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_BIT3_SHIFT                     3
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_MASK              0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_SHIFT             4
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_BIT5_MASK                      0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_BIT5_SHIFT                     5
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK                       0x3
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT                      6
        u8 flags1;
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_MASK         0x3
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_SHIFT        0
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_MASK         0x3
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_SHIFT        2
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK                 0x3
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT                4
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_MASK         0x3
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT        6
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_MASK       0x3
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_SHIFT      0
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_MASK       0x3
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_SHIFT      2
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK               0x3
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT              4
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_MASK       0x3
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT      6
        u8 flags2;
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK     0x3
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT    0
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF6_MASK                 0x3
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF6_SHIFT                2
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF7_MASK                 0x3
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF7_SHIFT                4
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF8_MASK                 0x3
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF8_SHIFT                6
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK   0x3
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT  0
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF6_MASK               0x3
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF6_SHIFT              2
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF7_MASK               0x3
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF7_SHIFT              4
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF8_MASK               0x3
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF8_SHIFT              6
        u8 flags3;
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF9_MASK                 0x3
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF9_SHIFT                0
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF10_MASK                0x3
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF10_SHIFT               2
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK               0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT              4
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_MASK      0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT     5
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_EN_MASK      0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_EN_SHIFT     6
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK               0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT              7
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF9_MASK               0x3
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF9_SHIFT              0
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF10_MASK              0x3
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF10_SHIFT             2
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK             0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT            4
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_MASK    0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT   5
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_EN_MASK    0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_EN_SHIFT   6
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK             0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT            7
        u8 flags4;
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK      0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT     0
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK  0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_MASK               0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_SHIFT              2
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF7EN_MASK               0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF7EN_SHIFT              3
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_MASK               0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_SHIFT              4
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_MASK               0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_SHIFT              5
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_MASK              0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_SHIFT             6
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK             0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT            7
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK            0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT           0
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK                0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT       1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_MASK                     0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_SHIFT                    2
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF7EN_MASK                     0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF7EN_SHIFT                    3
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_MASK                     0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_SHIFT                    4
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_MASK                     0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_SHIFT                    5
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_MASK                    0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_SHIFT                   6
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK                   0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT                  7
        u8 flags5;
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK             0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT            0
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK             0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT            1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK             0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT            2
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK             0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT            3
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK             0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT            4
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RQ_RULE_EN_MASK          0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RQ_RULE_EN_SHIFT         5
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK             0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT            6
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_MASK             0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_SHIFT            7
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK           0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT          0
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK           0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT          1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK           0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT          2
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK           0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT          3
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK           0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT          4
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RQ_RULE_EN_MASK                0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RQ_RULE_EN_SHIFT       5
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK           0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT          6
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_MASK           0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_SHIFT          7
        __le32 psn_and_rxmit_id_echo;
        __le32 reg1;
        __le32 reg2;
@@ -7935,63 +8530,63 @@ struct tstorm_roce_resp_conn_ag_ctx {
        __le32 reg10;
 };
 
-struct ustorm_roce_req_conn_ag_ctx {
+struct e4_ustorm_roce_req_conn_ag_ctx {
        u8 byte0;
        u8 byte1;
        u8 flags0;
-#define USTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK     0x1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT    0
-#define USTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK     0x1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT    1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK      0x3
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT     2
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK      0x3
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT     4
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK      0x3
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT     6
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK       0x1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT      0
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK       0x1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT      1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK                0x3
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT       2
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK                0x3
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT       4
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK                0x3
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT       6
        u8 flags1;
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF3_MASK      0x3
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF3_SHIFT     0
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF4_MASK      0x3
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF4_SHIFT     2
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF5_MASK      0x3
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF5_SHIFT     4
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF6_MASK      0x3
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF6_SHIFT     6
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF3_MASK                0x3
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF3_SHIFT       0
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF4_MASK                0x3
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF4_SHIFT       2
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF5_MASK                0x3
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF5_SHIFT       4
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF6_MASK                0x3
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF6_SHIFT       6
        u8 flags2;
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK    0x1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT   0
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK    0x1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT   1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK    0x1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT   2
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_MASK    0x1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_SHIFT   3
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF4EN_MASK    0x1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF4EN_SHIFT   4
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF5EN_MASK    0x1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF5EN_SHIFT   5
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF6EN_MASK    0x1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF6EN_SHIFT   6
-#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK  0x1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK      0x1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT     0
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK      0x1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT     1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK      0x1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT     2
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_MASK      0x1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_SHIFT     3
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF4EN_MASK      0x1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF4EN_SHIFT     4
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF5EN_MASK      0x1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF5EN_SHIFT     5
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF6EN_MASK      0x1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF6EN_SHIFT     6
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK    0x1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT   7
        u8 flags3;
-#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK  0x1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK  0x1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK  0x1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK  0x1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK  0x1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_MASK  0x1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_MASK  0x1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_MASK  0x1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK    0x1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT   0
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK    0x1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT   1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK    0x1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT   2
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK    0x1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT   3
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK    0x1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT   4
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_MASK    0x1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_SHIFT   5
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_MASK    0x1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_SHIFT   6
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_MASK    0x1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_SHIFT   7
        u8 byte2;
        u8 byte3;
        __le16 word0;
@@ -8004,63 +8599,63 @@ struct ustorm_roce_req_conn_ag_ctx {
        __le16 word3;
 };
 
-struct ustorm_roce_resp_conn_ag_ctx {
+struct e4_ustorm_roce_resp_conn_ag_ctx {
        u8 byte0;
        u8 byte1;
        u8 flags0;
-#define USTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK     0x1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT    0
-#define USTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK     0x1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT    1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK      0x3
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT     2
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK      0x3
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT     4
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK      0x3
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT     6
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK      0x1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT     0
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK      0x1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT     1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK       0x3
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT      2
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK       0x3
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT      4
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK       0x3
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT      6
        u8 flags1;
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK      0x3
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT     0
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF4_MASK      0x3
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF4_SHIFT     2
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF5_MASK      0x3
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF5_SHIFT     4
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF6_MASK      0x3
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF6_SHIFT     6
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK       0x3
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT      0
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF4_MASK       0x3
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF4_SHIFT      2
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF5_MASK       0x3
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF5_SHIFT      4
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF6_MASK       0x3
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF6_SHIFT      6
        u8 flags2;
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK    0x1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT   0
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK    0x1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT   1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK    0x1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT   2
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK    0x1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT   3
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF4EN_MASK    0x1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF4EN_SHIFT   4
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF5EN_MASK    0x1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF5EN_SHIFT   5
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_MASK    0x1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_SHIFT   6
-#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK  0x1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK     0x1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT    0
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK     0x1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT    1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK     0x1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT    2
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK     0x1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT    3
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF4EN_MASK     0x1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF4EN_SHIFT    4
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF5EN_MASK     0x1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF5EN_SHIFT    5
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_MASK     0x1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_SHIFT    6
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK   0x1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT  7
        u8 flags3;
-#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK  0x1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK  0x1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK  0x1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK  0x1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK  0x1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_MASK  0x1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK  0x1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_MASK  0x1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK   0x1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT  0
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK   0x1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT  1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK   0x1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT  2
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK   0x1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT  3
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK   0x1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT  4
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_MASK   0x1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_SHIFT  5
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK   0x1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT  6
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_MASK   0x1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_SHIFT  7
        u8 byte2;
        u8 byte3;
        __le16 word0;
@@ -8073,214 +8668,214 @@ struct ustorm_roce_resp_conn_ag_ctx {
        __le16 word3;
 };
 
-struct xstorm_roce_req_conn_ag_ctx {
+struct e4_xstorm_roce_req_conn_ag_ctx {
        u8 reserved0;
        u8 state;
        u8 flags0;
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_MASK        0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_SHIFT       0
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED1_MASK           0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED1_SHIFT          1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED2_MASK           0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED2_SHIFT          2
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM3_MASK        0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM3_SHIFT       3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED3_MASK           0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED3_SHIFT          4
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED4_MASK           0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED4_SHIFT          5
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED5_MASK           0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED5_SHIFT          6
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED6_MASK           0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED6_SHIFT          7
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_MASK       0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_SHIFT      0
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED1_MASK          0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED1_SHIFT         1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED2_MASK          0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED2_SHIFT         2
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM3_MASK       0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM3_SHIFT      3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED3_MASK          0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED3_SHIFT         4
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED4_MASK          0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED4_SHIFT         5
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED5_MASK          0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED5_SHIFT         6
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED6_MASK          0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED6_SHIFT         7
        u8 flags1;
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED7_MASK           0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED7_SHIFT          0
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED8_MASK           0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED8_SHIFT          1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT10_MASK               0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT10_SHIFT              2
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT11_MASK               0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT11_SHIFT              3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT12_MASK               0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT12_SHIFT              4
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT13_MASK               0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT13_SHIFT              5
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_STATE_MASK         0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_STATE_SHIFT        6
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_YSTORM_FLUSH_MASK        0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_YSTORM_FLUSH_SHIFT       7
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED7_MASK          0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED7_SHIFT         0
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED8_MASK          0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED8_SHIFT         1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT10_MASK              0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT10_SHIFT             2
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT11_MASK              0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT11_SHIFT             3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT12_MASK              0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT12_SHIFT             4
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT13_MASK              0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT13_SHIFT             5
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_STATE_MASK                0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_STATE_SHIFT       6
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_YSTORM_FLUSH_MASK       0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_YSTORM_FLUSH_SHIFT      7
        u8 flags2;
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK                 0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT                0
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK                 0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT                2
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK                 0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT                4
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF3_MASK                 0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF3_SHIFT                6
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK                0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT       0
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK                0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT       2
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK                0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT       4
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF3_MASK                0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF3_SHIFT       6
        u8 flags3;
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_MASK         0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_SHIFT        0
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_MASK         0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_SHIFT        2
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_MASK        0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_SHIFT       4
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_MASK         0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT        6
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_MASK                0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_SHIFT       0
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_MASK                0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_SHIFT       2
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_MASK       0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_SHIFT      4
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_MASK                0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT       6
        u8 flags4;
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF8_MASK                 0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF8_SHIFT                0
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF9_MASK                 0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF9_SHIFT                2
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF10_MASK                0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF10_SHIFT               4
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF11_MASK                0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF11_SHIFT               6
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF8_MASK                0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF8_SHIFT       0
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF9_MASK                0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF9_SHIFT       2
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF10_MASK       0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF10_SHIFT      4
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF11_MASK       0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF11_SHIFT      6
        u8 flags5;
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF12_MASK                0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF12_SHIFT               0
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF13_MASK                0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF13_SHIFT               2
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_FMR_ENDED_CF_MASK        0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_FMR_ENDED_CF_SHIFT       4
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF15_MASK                0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF15_SHIFT               6
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF12_MASK               0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF12_SHIFT              0
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF13_MASK               0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF13_SHIFT              2
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FMR_ENDED_CF_MASK       0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FMR_ENDED_CF_SHIFT      4
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF15_MASK               0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF15_SHIFT              6
        u8 flags6;
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF16_MASK                0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF16_SHIFT               0
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF17_MASK                0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF17_SHIFT               2
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF18_MASK                0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF18_SHIFT               4
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF19_MASK                0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF19_SHIFT               6
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF16_MASK       0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF16_SHIFT      0
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF17_MASK       0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF17_SHIFT      2
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF18_MASK       0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF18_SHIFT      4
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF19_MASK       0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF19_SHIFT      6
        u8 flags7;
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF20_MASK                0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF20_SHIFT               0
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF21_MASK                0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF21_SHIFT               2
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_MASK           0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_SHIFT          4
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK               0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT              6
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK               0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT              7
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF20_MASK       0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF20_SHIFT      0
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF21_MASK       0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF21_SHIFT      2
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_MASK  0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK      0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT     6
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK      0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT     7
        u8 flags8;
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK               0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT              0
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_MASK               0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_SHIFT              1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_EN_MASK      0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_EN_SHIFT     2
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_EN_MASK      0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT     3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_EN_MASK     0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_EN_SHIFT    4
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK      0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT     5
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF8EN_MASK               0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF8EN_SHIFT              6
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF9EN_MASK               0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF9EN_SHIFT              7
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK              0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT             0
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_MASK              0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_SHIFT             1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_EN_MASK     0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_EN_SHIFT    2
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_EN_MASK     0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT    3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_EN_MASK    0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_EN_SHIFT   4
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK     0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT    5
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF8EN_MASK              0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF8EN_SHIFT             6
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF9EN_MASK              0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF9EN_SHIFT             7
        u8 flags9;
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF10EN_MASK              0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF10EN_SHIFT             0
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF11EN_MASK              0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF11EN_SHIFT             1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF12EN_MASK              0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF12EN_SHIFT             2
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF13EN_MASK              0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF13EN_SHIFT             3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_FME_ENDED_CF_EN_MASK     0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_FME_ENDED_CF_EN_SHIFT    4
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF15EN_MASK              0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF15EN_SHIFT             5
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF16EN_MASK              0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF16EN_SHIFT             6
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF17EN_MASK              0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF17EN_SHIFT             7
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF10EN_MASK             0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF10EN_SHIFT            0
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF11EN_MASK             0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF11EN_SHIFT            1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF12EN_MASK             0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF12EN_SHIFT            2
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF13EN_MASK             0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF13EN_SHIFT            3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FME_ENDED_CF_EN_MASK    0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FME_ENDED_CF_EN_SHIFT   4
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF15EN_MASK             0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF15EN_SHIFT            5
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF16EN_MASK             0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF16EN_SHIFT            6
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF17EN_MASK             0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF17EN_SHIFT            7
        u8 flags10;
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF18EN_MASK              0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF18EN_SHIFT             0
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF19EN_MASK              0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF19EN_SHIFT             1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF20EN_MASK              0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF20EN_SHIFT             2
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF21EN_MASK              0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF21EN_SHIFT             3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_EN_MASK        0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_EN_SHIFT       4
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF23EN_MASK              0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF23EN_SHIFT             5
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK             0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT            6
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK             0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT            7
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF18EN_MASK             0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF18EN_SHIFT            0
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF19EN_MASK             0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF19EN_SHIFT            1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF20EN_MASK             0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF20EN_SHIFT            2
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF21EN_MASK             0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF21EN_SHIFT            3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_EN_MASK       0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_EN_SHIFT      4
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF23EN_MASK             0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF23EN_SHIFT            5
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK            0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT           6
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK            0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT           7
        u8 flags11;
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK             0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT            0
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK             0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT            1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK             0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT            2
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK             0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT            3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_MASK             0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_SHIFT            4
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_E2E_CREDIT_RULE_EN_MASK  0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_E2E_CREDIT_RULE_EN_SHIFT 5
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED1_MASK        0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED1_SHIFT       6
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE9EN_MASK             0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE9EN_SHIFT            7
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK            0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT           0
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK            0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT           1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK            0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT           2
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK            0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT           3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_MASK            0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_SHIFT           4
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_E2E_CREDIT_RULE_EN_MASK 0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_E2E_CREDIT_RULE_EN_SHIFT        5
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED1_MASK       0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED1_SHIFT      6
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE9EN_MASK            0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE9EN_SHIFT           7
        u8 flags12;
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_PROD_EN_MASK          0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_PROD_EN_SHIFT         0
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE11EN_MASK            0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE11EN_SHIFT           1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED2_MASK        0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED2_SHIFT       2
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED3_MASK        0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED3_SHIFT       3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_INV_FENCE_RULE_EN_MASK   0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_INV_FENCE_RULE_EN_SHIFT  4
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE15EN_MASK            0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE15EN_SHIFT           5
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_ORQ_FENCE_RULE_EN_MASK   0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_ORQ_FENCE_RULE_EN_SHIFT  6
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_MAX_ORD_RULE_EN_MASK     0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_MAX_ORD_RULE_EN_SHIFT    7
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_PROD_EN_MASK         0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_PROD_EN_SHIFT                0
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE11EN_MASK           0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE11EN_SHIFT          1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED2_MASK       0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED2_SHIFT      2
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED3_MASK       0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED3_SHIFT      3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_INV_FENCE_RULE_EN_MASK  0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_INV_FENCE_RULE_EN_SHIFT 4
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE15EN_MASK           0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE15EN_SHIFT          5
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_ORQ_FENCE_RULE_EN_MASK  0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_ORQ_FENCE_RULE_EN_SHIFT 6
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_MAX_ORD_RULE_EN_MASK    0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_MAX_ORD_RULE_EN_SHIFT   7
        u8 flags13;
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE18EN_MASK            0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE18EN_SHIFT           0
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE19EN_MASK            0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE19EN_SHIFT           1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED4_MASK        0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED4_SHIFT       2
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED5_MASK        0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED5_SHIFT       3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED6_MASK        0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED6_SHIFT       4
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED7_MASK        0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED7_SHIFT       5
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED8_MASK        0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED8_SHIFT       6
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED9_MASK        0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED9_SHIFT       7
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE18EN_MASK           0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE18EN_SHIFT          0
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE19EN_MASK           0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE19EN_SHIFT          1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED4_MASK       0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED4_SHIFT      2
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED5_MASK       0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED5_SHIFT      3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED6_MASK       0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED6_SHIFT      4
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED7_MASK       0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED7_SHIFT      5
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED8_MASK       0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED8_SHIFT      6
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED9_MASK       0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED9_SHIFT      7
        u8 flags14;
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_MIGRATION_FLAG_MASK      0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_MIGRATION_FLAG_SHIFT     0
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT17_MASK               0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT17_SHIFT              1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_DPM_PORT_NUM_MASK        0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_DPM_PORT_NUM_SHIFT       2
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED_MASK            0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED_SHIFT           4
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK    0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT   5
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF23_MASK                0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF23_SHIFT               6
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_MIGRATION_FLAG_MASK     0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_MIGRATION_FLAG_SHIFT    0
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT17_MASK              0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT17_SHIFT             1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_DPM_PORT_NUM_MASK       0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_DPM_PORT_NUM_SHIFT      2
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED_MASK           0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED_SHIFT          4
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK   0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT  5
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF23_MASK               0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF23_SHIFT              6
        u8 byte2;
        __le16 physical_q0;
        __le16 word1;
@@ -8302,224 +8897,224 @@ struct xstorm_roce_req_conn_ag_ctx {
        __le32 orq_cons;
 };
 
-struct xstorm_roce_resp_conn_ag_ctx {
+struct e4_xstorm_roce_resp_conn_ag_ctx {
        u8 reserved0;
        u8 state;
        u8 flags0;
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_MASK      0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT     0
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED1_MASK         0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED1_SHIFT        1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED2_MASK         0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED2_SHIFT        2
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM3_MASK      0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM3_SHIFT     3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED3_MASK         0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED3_SHIFT        4
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED4_MASK         0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED4_SHIFT        5
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED5_MASK         0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED5_SHIFT        6
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED6_MASK         0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED6_SHIFT        7
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_MASK      0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT     0
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED1_MASK         0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED1_SHIFT                1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED2_MASK         0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED2_SHIFT                2
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM3_MASK      0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM3_SHIFT     3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED3_MASK         0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED3_SHIFT                4
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED4_MASK         0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED4_SHIFT                5
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED5_MASK         0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED5_SHIFT                6
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED6_MASK         0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED6_SHIFT                7
        u8 flags1;
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED7_MASK         0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED7_SHIFT        0
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED8_MASK         0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED8_SHIFT        1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT10_MASK             0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT10_SHIFT            2
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT11_MASK             0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT11_SHIFT            3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT12_MASK             0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT12_SHIFT            4
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT13_MASK             0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT13_SHIFT            5
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_ERROR_STATE_MASK       0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_ERROR_STATE_SHIFT      6
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_YSTORM_FLUSH_MASK      0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_YSTORM_FLUSH_SHIFT     7
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED7_MASK         0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED7_SHIFT                0
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED8_MASK         0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED8_SHIFT                1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT10_MASK             0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT10_SHIFT            2
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT11_MASK             0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT11_SHIFT            3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT12_MASK             0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT12_SHIFT            4
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT13_MASK             0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT13_SHIFT            5
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_ERROR_STATE_MASK       0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_ERROR_STATE_SHIFT      6
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_YSTORM_FLUSH_MASK      0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_YSTORM_FLUSH_SHIFT     7
        u8 flags2;
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK               0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT              0
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK               0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT              2
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK               0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT              4
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK               0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT              6
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK       0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT      0
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK       0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT      2
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK       0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT      4
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK       0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT      6
        u8 flags3;
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_MASK          0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_SHIFT         0
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_MASK       0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_SHIFT      2
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_MASK      0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_SHIFT     4
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_MASK       0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT      6
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_MASK          0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_SHIFT         0
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_MASK       0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_SHIFT      2
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_MASK      0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_SHIFT     4
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_MASK       0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT      6
        u8 flags4;
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF8_MASK               0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF8_SHIFT              0
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF9_MASK               0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF9_SHIFT              2
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF10_MASK              0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF10_SHIFT             4
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF11_MASK              0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF11_SHIFT             6
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF8_MASK       0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF8_SHIFT      0
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF9_MASK       0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF9_SHIFT      2
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF10_MASK      0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF10_SHIFT     4
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF11_MASK      0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF11_SHIFT     6
        u8 flags5;
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF12_MASK              0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF12_SHIFT             0
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF13_MASK              0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF13_SHIFT             2
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF14_MASK              0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF14_SHIFT             4
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF15_MASK              0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF15_SHIFT             6
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF12_MASK      0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF12_SHIFT     0
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF13_MASK      0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF13_SHIFT     2
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF14_MASK      0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF14_SHIFT     4
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF15_MASK      0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF15_SHIFT     6
        u8 flags6;
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF16_MASK              0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF16_SHIFT             0
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF17_MASK              0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF17_SHIFT             2
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF18_MASK              0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF18_SHIFT             4
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF19_MASK              0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF19_SHIFT             6
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF16_MASK      0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF16_SHIFT     0
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF17_MASK      0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF17_SHIFT     2
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF18_MASK      0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF18_SHIFT     4
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF19_MASK      0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF19_SHIFT     6
        u8 flags7;
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF20_MASK              0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF20_SHIFT             0
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF21_MASK              0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF21_SHIFT             2
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_MASK         0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_SHIFT        4
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK             0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT            6
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK             0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT            7
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF20_MASK      0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF20_SHIFT     0
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF21_MASK      0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF21_SHIFT     2
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_SHIFT        4
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK     0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT    6
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK     0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT    7
        u8 flags8;
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK             0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT            0
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK             0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT            1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_EN_MASK       0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_EN_SHIFT      2
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_MASK    0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT   3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_EN_MASK   0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_EN_SHIFT  4
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK    0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT   5
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_MASK             0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_SHIFT            6
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_MASK             0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_SHIFT            7
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK             0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT            0
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK             0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT            1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_EN_MASK       0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_EN_SHIFT      2
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_MASK    0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT   3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_EN_MASK   0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_EN_SHIFT  4
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK    0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT   5
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_MASK             0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_SHIFT            6
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_MASK             0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_SHIFT            7
        u8 flags9;
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_MASK            0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_SHIFT           0
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF11EN_MASK            0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF11EN_SHIFT           1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF12EN_MASK            0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF12EN_SHIFT           2
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF13EN_MASK            0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF13EN_SHIFT           3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF14EN_MASK            0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF14EN_SHIFT           4
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF15EN_MASK            0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF15EN_SHIFT           5
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF16EN_MASK            0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF16EN_SHIFT           6
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF17EN_MASK            0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF17EN_SHIFT           7
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_MASK    0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_SHIFT   0
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF11EN_MASK    0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF11EN_SHIFT   1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF12EN_MASK    0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF12EN_SHIFT   2
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF13EN_MASK    0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF13EN_SHIFT   3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF14EN_MASK    0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF14EN_SHIFT   4
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF15EN_MASK    0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF15EN_SHIFT   5
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF16EN_MASK    0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF16EN_SHIFT   6
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF17EN_MASK    0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF17EN_SHIFT   7
        u8 flags10;
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF18EN_MASK            0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF18EN_SHIFT           0
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF19EN_MASK            0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF19EN_SHIFT           1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF20EN_MASK            0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF20EN_SHIFT           2
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF21EN_MASK            0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF21EN_SHIFT           3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_EN_MASK      0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_EN_SHIFT     4
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF23EN_MASK            0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF23EN_SHIFT           5
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK           0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT          6
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK           0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT          7
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF18EN_MASK            0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF18EN_SHIFT           0
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF19EN_MASK            0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF19EN_SHIFT           1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF20EN_MASK            0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF20EN_SHIFT           2
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF21EN_MASK            0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF21EN_SHIFT           3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_EN_MASK      0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_EN_SHIFT     4
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF23EN_MASK            0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF23EN_SHIFT           5
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK           0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT          6
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK           0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT          7
        u8 flags11;
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK           0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT          0
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK           0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT          1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK           0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT          2
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK           0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT          3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_MASK           0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_SHIFT          4
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK           0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT          5
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED1_MASK      0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED1_SHIFT     6
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE9EN_MASK           0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE9EN_SHIFT          7
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK           0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT          0
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK           0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT          1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK           0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT          2
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK           0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT          3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_MASK           0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_SHIFT          4
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK           0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT          5
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED1_MASK      0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED1_SHIFT     6
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE9EN_MASK           0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE9EN_SHIFT          7
        u8 flags12;
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE10EN_MASK          0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE10EN_SHIFT         0
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_IRQ_PROD_RULE_EN_MASK  0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_IRQ_PROD_RULE_EN_SHIFT 1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED2_MASK      0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED2_SHIFT     2
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED3_MASK      0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED3_SHIFT     3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE14EN_MASK          0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE14EN_SHIFT         4
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE15EN_MASK          0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE15EN_SHIFT         5
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE16EN_MASK          0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE16EN_SHIFT         6
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE17EN_MASK          0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE17EN_SHIFT         7
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_IRQ_PROD_RULE_EN_MASK  0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_IRQ_PROD_RULE_EN_SHIFT 0
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE11EN_MASK          0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE11EN_SHIFT         1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED2_MASK      0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED2_SHIFT     2
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED3_MASK      0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED3_SHIFT     3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE14EN_MASK          0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE14EN_SHIFT         4
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE15EN_MASK          0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE15EN_SHIFT         5
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE16EN_MASK          0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE16EN_SHIFT         6
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE17EN_MASK          0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE17EN_SHIFT         7
        u8 flags13;
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE18EN_MASK          0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE18EN_SHIFT         0
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE19EN_MASK          0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE19EN_SHIFT         1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED4_MASK      0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED4_SHIFT     2
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED5_MASK      0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED5_SHIFT     3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED6_MASK      0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED6_SHIFT     4
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED7_MASK      0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED7_SHIFT     5
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED8_MASK      0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED8_SHIFT     6
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED9_MASK      0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED9_SHIFT     7
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE18EN_MASK          0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE18EN_SHIFT         0
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE19EN_MASK          0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE19EN_SHIFT         1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED4_MASK      0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED4_SHIFT     2
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED5_MASK      0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED5_SHIFT     3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED6_MASK      0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED6_SHIFT     4
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED7_MASK      0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED7_SHIFT     5
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED8_MASK      0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED8_SHIFT     6
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED9_MASK      0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED9_SHIFT     7
        u8 flags14;
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT16_MASK             0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT16_SHIFT            0
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT17_MASK             0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT17_SHIFT            1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT18_MASK             0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT18_SHIFT            2
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT19_MASK             0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT19_SHIFT            3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT20_MASK             0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT20_SHIFT            4
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT21_MASK             0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT21_SHIFT            5
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF23_MASK              0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF23_SHIFT             6
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT16_MASK     0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT16_SHIFT    0
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT17_MASK     0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT17_SHIFT    1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT18_MASK     0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT18_SHIFT    2
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT19_MASK     0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT19_SHIFT    3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT20_MASK     0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT20_SHIFT    4
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT21_MASK     0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT21_SHIFT    5
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF23_MASK      0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF23_SHIFT     6
        u8 byte2;
        __le16 physical_q0;
-       __le16 word1;
-       __le16 irq_prod;
-       __le16 word3;
-       __le16 word4;
-       __le16 ereserved1;
+       __le16 irq_prod_shadow;
+       __le16 word2;
        __le16 irq_cons;
+       __le16 irq_prod;
+       __le16 e5_reserved1;
+       __le16 conn_dpi;
        u8 rxmit_opcode;
        u8 byte4;
        u8 byte5;
@@ -8533,37 +9128,37 @@ struct xstorm_roce_resp_conn_ag_ctx {
        __le32 msn_and_syndrome;
 };
 
-struct ystorm_roce_req_conn_ag_ctx {
+struct e4_ystorm_roce_req_conn_ag_ctx {
        u8 byte0;
        u8 byte1;
        u8 flags0;
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK     0x1
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT    0
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK     0x1
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT    1
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK      0x3
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT     2
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK      0x3
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT     4
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK      0x3
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT     6
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK       0x1
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT      0
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK       0x1
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT      1
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK                0x3
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT       2
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK                0x3
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT       4
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK                0x3
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT       6
        u8 flags1;
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK    0x1
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT   0
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK    0x1
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT   1
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK    0x1
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT   2
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK  0x1
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK  0x1
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK  0x1
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK  0x1
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK  0x1
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK      0x1
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT     0
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK      0x1
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT     1
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK      0x1
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT     2
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK    0x1
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT   3
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK    0x1
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT   4
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK    0x1
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT   5
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK    0x1
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT   6
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK    0x1
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT   7
        u8 byte2;
        u8 byte3;
        __le16 word0;
@@ -8577,37 +9172,37 @@ struct ystorm_roce_req_conn_ag_ctx {
        __le32 reg3;
 };
 
-struct ystorm_roce_resp_conn_ag_ctx {
+struct e4_ystorm_roce_resp_conn_ag_ctx {
        u8 byte0;
        u8 byte1;
        u8 flags0;
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK     0x1
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT    0
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK     0x1
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT    1
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK      0x3
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT     2
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK      0x3
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT     4
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK      0x3
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT     6
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK      0x1
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT     0
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK      0x1
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT     1
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK       0x3
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT      2
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK       0x3
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT      4
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK       0x3
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT      6
        u8 flags1;
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK    0x1
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT   0
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK    0x1
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT   1
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK    0x1
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT   2
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK  0x1
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK  0x1
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK  0x1
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK  0x1
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK  0x1
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK     0x1
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT    0
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK     0x1
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT    1
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK     0x1
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT    2
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK   0x1
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT  3
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK   0x1
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT  4
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK   0x1
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT  5
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK   0x1
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT  6
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK   0x1
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT  7
        u8 byte2;
        u8 byte3;
        __le16 word0;
@@ -8621,6 +9216,7 @@ struct ystorm_roce_resp_conn_ag_ctx {
        __le32 reg3;
 };
 
+/* Roce doorbell data */
 enum roce_flavor {
        PLAIN_ROCE,
        RROCE_IPV4,
@@ -8628,228 +9224,231 @@ enum roce_flavor {
        MAX_ROCE_FLAVOR
 };
 
+/* The iwarp storm context of Ystorm */
 struct ystorm_iwarp_conn_st_ctx {
        __le32 reserved[4];
 };
 
+/* The iwarp storm context of Pstorm */
 struct pstorm_iwarp_conn_st_ctx {
        __le32 reserved[36];
 };
 
+/* The iwarp storm context of Xstorm */
 struct xstorm_iwarp_conn_st_ctx {
        __le32 reserved[44];
 };
 
-struct xstorm_iwarp_conn_ag_ctx {
+struct e4_xstorm_iwarp_conn_ag_ctx {
        u8 reserved0;
        u8 state;
        u8 flags0;
-#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK     0x1
-#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT    0
-#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM1_MASK     0x1
-#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM1_SHIFT    1
-#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM2_MASK     0x1
-#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM2_SHIFT    2
-#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM3_MASK     0x1
-#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM3_SHIFT    3
-#define XSTORM_IWARP_CONN_AG_CTX_BIT4_MASK     0x1
-#define XSTORM_IWARP_CONN_AG_CTX_BIT4_SHIFT    4
-#define XSTORM_IWARP_CONN_AG_CTX_RESERVED2_MASK        0x1
-#define XSTORM_IWARP_CONN_AG_CTX_RESERVED2_SHIFT       5
-#define XSTORM_IWARP_CONN_AG_CTX_BIT6_MASK     0x1
-#define XSTORM_IWARP_CONN_AG_CTX_BIT6_SHIFT    6
-#define XSTORM_IWARP_CONN_AG_CTX_BIT7_MASK     0x1
-#define XSTORM_IWARP_CONN_AG_CTX_BIT7_SHIFT    7
+#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK  0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM1_MASK  0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM1_SHIFT 1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM2_MASK  0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM2_SHIFT 2
+#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM3_MASK  0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT4_MASK          0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT4_SHIFT         4
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RESERVED2_MASK     0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RESERVED2_SHIFT    5
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT6_MASK          0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT6_SHIFT         6
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT7_MASK          0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT7_SHIFT         7
        u8 flags1;
-#define XSTORM_IWARP_CONN_AG_CTX_BIT8_MASK     0x1
-#define XSTORM_IWARP_CONN_AG_CTX_BIT8_SHIFT    0
-#define XSTORM_IWARP_CONN_AG_CTX_BIT9_MASK     0x1
-#define XSTORM_IWARP_CONN_AG_CTX_BIT9_SHIFT    1
-#define XSTORM_IWARP_CONN_AG_CTX_BIT10_MASK    0x1
-#define XSTORM_IWARP_CONN_AG_CTX_BIT10_SHIFT   2
-#define XSTORM_IWARP_CONN_AG_CTX_BIT11_MASK    0x1
-#define XSTORM_IWARP_CONN_AG_CTX_BIT11_SHIFT   3
-#define XSTORM_IWARP_CONN_AG_CTX_BIT12_MASK    0x1
-#define XSTORM_IWARP_CONN_AG_CTX_BIT12_SHIFT   4
-#define XSTORM_IWARP_CONN_AG_CTX_BIT13_MASK    0x1
-#define XSTORM_IWARP_CONN_AG_CTX_BIT13_SHIFT   5
-#define XSTORM_IWARP_CONN_AG_CTX_BIT14_MASK    0x1
-#define XSTORM_IWARP_CONN_AG_CTX_BIT14_SHIFT   6
-#define XSTORM_IWARP_CONN_AG_CTX_YSTORM_FLUSH_OR_REWIND_SND_MAX_MASK   0x1
-#define XSTORM_IWARP_CONN_AG_CTX_YSTORM_FLUSH_OR_REWIND_SND_MAX_SHIFT  7
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT8_MASK                          0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT8_SHIFT                         0
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT9_MASK                          0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT9_SHIFT                         1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT10_MASK                         0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT10_SHIFT                                2
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT11_MASK                         0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT11_SHIFT                                3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT12_MASK                         0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT12_SHIFT                                4
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT13_MASK                         0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT13_SHIFT                                5
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT14_MASK                         0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT14_SHIFT                                6
+#define E4_XSTORM_IWARP_CONN_AG_CTX_YSTORM_FLUSH_OR_REWIND_SND_MAX_MASK        0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_YSTORM_FLUSH_OR_REWIND_SND_MAX_SHIFT 7
        u8 flags2;
-#define XSTORM_IWARP_CONN_AG_CTX_CF0_MASK      0x3
-#define XSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT     0
-#define XSTORM_IWARP_CONN_AG_CTX_CF1_MASK      0x3
-#define XSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT     2
-#define XSTORM_IWARP_CONN_AG_CTX_CF2_MASK      0x3
-#define XSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT     4
-#define XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_MASK   0x3
-#define XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT  6
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF0_MASK                   0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT                  0
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF1_MASK                   0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT                  2
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF2_MASK                   0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT                  4
+#define E4_XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_MASK                0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT       6
        u8 flags3;
-#define XSTORM_IWARP_CONN_AG_CTX_CF4_MASK      0x3
-#define XSTORM_IWARP_CONN_AG_CTX_CF4_SHIFT     0
-#define XSTORM_IWARP_CONN_AG_CTX_CF5_MASK      0x3
-#define XSTORM_IWARP_CONN_AG_CTX_CF5_SHIFT     2
-#define XSTORM_IWARP_CONN_AG_CTX_CF6_MASK      0x3
-#define XSTORM_IWARP_CONN_AG_CTX_CF6_SHIFT     4
-#define XSTORM_IWARP_CONN_AG_CTX_CF7_MASK      0x3
-#define XSTORM_IWARP_CONN_AG_CTX_CF7_SHIFT     6
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF4_MASK   0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF4_SHIFT  0
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF5_MASK   0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF5_SHIFT  2
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF6_MASK   0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF6_SHIFT  4
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF7_MASK   0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF7_SHIFT  6
        u8 flags4;
-#define XSTORM_IWARP_CONN_AG_CTX_CF8_MASK      0x3
-#define XSTORM_IWARP_CONN_AG_CTX_CF8_SHIFT     0
-#define XSTORM_IWARP_CONN_AG_CTX_CF9_MASK      0x3
-#define XSTORM_IWARP_CONN_AG_CTX_CF9_SHIFT     2
-#define XSTORM_IWARP_CONN_AG_CTX_CF10_MASK     0x3
-#define XSTORM_IWARP_CONN_AG_CTX_CF10_SHIFT    4
-#define XSTORM_IWARP_CONN_AG_CTX_CF11_MASK     0x3
-#define XSTORM_IWARP_CONN_AG_CTX_CF11_SHIFT    6
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF8_MASK   0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF8_SHIFT  0
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF9_MASK   0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF9_SHIFT  2
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF10_MASK  0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF10_SHIFT 4
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF11_MASK  0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF11_SHIFT 6
        u8 flags5;
-#define XSTORM_IWARP_CONN_AG_CTX_CF12_MASK     0x3
-#define XSTORM_IWARP_CONN_AG_CTX_CF12_SHIFT    0
-#define XSTORM_IWARP_CONN_AG_CTX_CF13_MASK     0x3
-#define XSTORM_IWARP_CONN_AG_CTX_CF13_SHIFT    2
-#define XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_MASK      0x3
-#define XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_SHIFT     4
-#define XSTORM_IWARP_CONN_AG_CTX_CF15_MASK     0x3
-#define XSTORM_IWARP_CONN_AG_CTX_CF15_SHIFT    6
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF12_MASK          0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF12_SHIFT         0
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF13_MASK          0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF13_SHIFT         2
+#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_MASK   0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_SHIFT  4
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF15_MASK          0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF15_SHIFT         6
        u8 flags6;
-#define XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_MASK   0x3
-#define XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_SHIFT  0
-#define XSTORM_IWARP_CONN_AG_CTX_CF17_MASK     0x3
-#define XSTORM_IWARP_CONN_AG_CTX_CF17_SHIFT    2
-#define XSTORM_IWARP_CONN_AG_CTX_CF18_MASK     0x3
-#define XSTORM_IWARP_CONN_AG_CTX_CF18_SHIFT    4
-#define XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_MASK 0x3
-#define XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_SHIFT        6
+#define E4_XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_MASK        0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_SHIFT 0
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF17_MASK                          0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF17_SHIFT                         2
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF18_MASK                          0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF18_SHIFT                         4
+#define E4_XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_MASK                      0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_SHIFT                     6
        u8 flags7;
-#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
-#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_SHIFT        0
-#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_MASK 0x3
-#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_SHIFT        2
-#define XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_MASK        0x3
-#define XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_SHIFT       4
-#define XSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK    0x1
-#define XSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT   6
-#define XSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK    0x1
-#define XSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT   7
+#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_MASK      0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_SHIFT     0
+#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_MASK      0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_SHIFT     2
+#define E4_XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_MASK     0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_SHIFT    4
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK         0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT                6
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK         0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT                7
        u8 flags8;
-#define XSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK    0x1
-#define XSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT   0
-#define XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK        0x1
-#define XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT       1
-#define XSTORM_IWARP_CONN_AG_CTX_CF4EN_MASK    0x1
-#define XSTORM_IWARP_CONN_AG_CTX_CF4EN_SHIFT   2
-#define XSTORM_IWARP_CONN_AG_CTX_CF5EN_MASK    0x1
-#define XSTORM_IWARP_CONN_AG_CTX_CF5EN_SHIFT   3
-#define XSTORM_IWARP_CONN_AG_CTX_CF6EN_MASK    0x1
-#define XSTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT   4
-#define XSTORM_IWARP_CONN_AG_CTX_CF7EN_MASK    0x1
-#define XSTORM_IWARP_CONN_AG_CTX_CF7EN_SHIFT   5
-#define XSTORM_IWARP_CONN_AG_CTX_CF8EN_MASK    0x1
-#define XSTORM_IWARP_CONN_AG_CTX_CF8EN_SHIFT   6
-#define XSTORM_IWARP_CONN_AG_CTX_CF9EN_MASK    0x1
-#define XSTORM_IWARP_CONN_AG_CTX_CF9EN_SHIFT   7
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK                 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT                        0
+#define E4_XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK     0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT    1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF4EN_MASK                 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF4EN_SHIFT                        2
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF5EN_MASK                 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF5EN_SHIFT                        3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF6EN_MASK                 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT                        4
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF7EN_MASK                 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF7EN_SHIFT                        5
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF8EN_MASK                 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF8EN_SHIFT                        6
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF9EN_MASK                 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF9EN_SHIFT                        7
        u8 flags9;
-#define XSTORM_IWARP_CONN_AG_CTX_CF10EN_MASK   0x1
-#define XSTORM_IWARP_CONN_AG_CTX_CF10EN_SHIFT  0
-#define XSTORM_IWARP_CONN_AG_CTX_CF11EN_MASK   0x1
-#define XSTORM_IWARP_CONN_AG_CTX_CF11EN_SHIFT  1
-#define XSTORM_IWARP_CONN_AG_CTX_CF12EN_MASK   0x1
-#define XSTORM_IWARP_CONN_AG_CTX_CF12EN_SHIFT  2
-#define XSTORM_IWARP_CONN_AG_CTX_CF13EN_MASK   0x1
-#define XSTORM_IWARP_CONN_AG_CTX_CF13EN_SHIFT  3
-#define XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_EN_MASK   0x1
-#define XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_EN_SHIFT  4
-#define XSTORM_IWARP_CONN_AG_CTX_CF15EN_MASK   0x1
-#define XSTORM_IWARP_CONN_AG_CTX_CF15EN_SHIFT  5
-#define XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_EN_MASK        0x1
-#define XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_EN_SHIFT 6
-#define XSTORM_IWARP_CONN_AG_CTX_CF17EN_MASK   0x1
-#define XSTORM_IWARP_CONN_AG_CTX_CF17EN_SHIFT  7
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF10EN_MASK                                0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF10EN_SHIFT                       0
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF11EN_MASK                                0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF11EN_SHIFT                       1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF12EN_MASK                                0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF12EN_SHIFT                       2
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF13EN_MASK                                0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF13EN_SHIFT                       3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_EN_MASK                        0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_EN_SHIFT               4
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF15EN_MASK                                0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF15EN_SHIFT                       5
+#define E4_XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_EN_SHIFT 6
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF17EN_MASK                                0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF17EN_SHIFT                       7
        u8 flags10;
-#define XSTORM_IWARP_CONN_AG_CTX_CF18EN_MASK   0x1
-#define XSTORM_IWARP_CONN_AG_CTX_CF18EN_SHIFT  0
-#define XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_EN_MASK      0x1
-#define XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_EN_SHIFT     1
-#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_MASK      0x1
-#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT     2
-#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_EN_MASK      0x1
-#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_EN_SHIFT     3
-#define XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_EN_MASK     0x1
-#define XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_EN_SHIFT    4
-#define XSTORM_IWARP_CONN_AG_CTX_CF23EN_MASK   0x1
-#define XSTORM_IWARP_CONN_AG_CTX_CF23EN_SHIFT  5
-#define XSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK  0x1
-#define XSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 6
-#define XSTORM_IWARP_CONN_AG_CTX_MORE_TO_SEND_RULE_EN_MASK     0x1
-#define XSTORM_IWARP_CONN_AG_CTX_MORE_TO_SEND_RULE_EN_SHIFT    7
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF18EN_MASK                        0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF18EN_SHIFT               0
+#define E4_XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_EN_MASK           0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_EN_SHIFT          1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_MASK           0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT          2
+#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_EN_MASK           0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_EN_SHIFT          3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_EN_MASK          0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_EN_SHIFT         4
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF23EN_MASK                        0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF23EN_SHIFT               5
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK               0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT              6
+#define E4_XSTORM_IWARP_CONN_AG_CTX_MORE_TO_SEND_RULE_EN_MASK  0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_MORE_TO_SEND_RULE_EN_SHIFT 7
        u8 flags11;
-#define XSTORM_IWARP_CONN_AG_CTX_TX_BLOCKED_EN_MASK    0x1
-#define XSTORM_IWARP_CONN_AG_CTX_TX_BLOCKED_EN_SHIFT   0
-#define XSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK  0x1
-#define XSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 1
-#define XSTORM_IWARP_CONN_AG_CTX_RESERVED3_MASK        0x1
-#define XSTORM_IWARP_CONN_AG_CTX_RESERVED3_SHIFT       2
-#define XSTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK  0x1
-#define XSTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT 3
-#define XSTORM_IWARP_CONN_AG_CTX_RULE6EN_MASK  0x1
-#define XSTORM_IWARP_CONN_AG_CTX_RULE6EN_SHIFT 4
-#define XSTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK  0x1
-#define XSTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT 5
-#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED1_MASK     0x1
-#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED1_SHIFT    6
-#define XSTORM_IWARP_CONN_AG_CTX_RULE9EN_MASK  0x1
-#define XSTORM_IWARP_CONN_AG_CTX_RULE9EN_SHIFT 7
+#define E4_XSTORM_IWARP_CONN_AG_CTX_TX_BLOCKED_EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_TX_BLOCKED_EN_SHIFT        0
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK       0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT      1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RESERVED3_MASK     0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RESERVED3_SHIFT    2
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK       0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT      3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE6EN_MASK       0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE6EN_SHIFT      4
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK       0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT      5
+#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED1_MASK  0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE9EN_MASK       0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE9EN_SHIFT      7
        u8 flags12;
-#define XSTORM_IWARP_CONN_AG_CTX_SQ_NOT_EMPTY_RULE_EN_MASK     0x1
-#define XSTORM_IWARP_CONN_AG_CTX_SQ_NOT_EMPTY_RULE_EN_SHIFT    0
-#define XSTORM_IWARP_CONN_AG_CTX_RULE11EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_RULE11EN_SHIFT        1
-#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED2_MASK     0x1
-#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED2_SHIFT    2
-#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED3_MASK     0x1
-#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED3_SHIFT    3
-#define XSTORM_IWARP_CONN_AG_CTX_SQ_FENCE_RULE_EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_SQ_FENCE_RULE_EN_SHIFT        4
-#define XSTORM_IWARP_CONN_AG_CTX_RULE15EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_RULE15EN_SHIFT        5
-#define XSTORM_IWARP_CONN_AG_CTX_RULE16EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_RULE16EN_SHIFT        6
-#define XSTORM_IWARP_CONN_AG_CTX_RULE17EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_RULE17EN_SHIFT        7
+#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_NOT_EMPTY_RULE_EN_MASK  0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_NOT_EMPTY_RULE_EN_SHIFT 0
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE11EN_MASK              0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE11EN_SHIFT             1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED2_MASK          0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED2_SHIFT         2
+#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED3_MASK          0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED3_SHIFT         3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_FENCE_RULE_EN_MASK      0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_FENCE_RULE_EN_SHIFT     4
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE15EN_MASK              0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE15EN_SHIFT             5
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE16EN_MASK              0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE16EN_SHIFT             6
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE17EN_MASK              0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE17EN_SHIFT             7
        u8 flags13;
-#define XSTORM_IWARP_CONN_AG_CTX_IRQ_NOT_EMPTY_RULE_EN_MASK    0x1
-#define XSTORM_IWARP_CONN_AG_CTX_IRQ_NOT_EMPTY_RULE_EN_SHIFT   0
-#define XSTORM_IWARP_CONN_AG_CTX_HQ_NOT_FULL_RULE_EN_MASK      0x1
-#define XSTORM_IWARP_CONN_AG_CTX_HQ_NOT_FULL_RULE_EN_SHIFT     1
-#define XSTORM_IWARP_CONN_AG_CTX_ORQ_RD_FENCE_RULE_EN_MASK     0x1
-#define XSTORM_IWARP_CONN_AG_CTX_ORQ_RD_FENCE_RULE_EN_SHIFT    2
-#define XSTORM_IWARP_CONN_AG_CTX_RULE21EN_MASK 0x1
-#define XSTORM_IWARP_CONN_AG_CTX_RULE21EN_SHIFT        3
-#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED6_MASK     0x1
-#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED6_SHIFT    4
-#define XSTORM_IWARP_CONN_AG_CTX_ORQ_NOT_FULL_RULE_EN_MASK     0x1
-#define XSTORM_IWARP_CONN_AG_CTX_ORQ_NOT_FULL_RULE_EN_SHIFT    5
-#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED8_MASK     0x1
-#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED8_SHIFT    6
-#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED9_MASK     0x1
-#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED9_SHIFT    7
+#define E4_XSTORM_IWARP_CONN_AG_CTX_IRQ_NOT_EMPTY_RULE_EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_IRQ_NOT_EMPTY_RULE_EN_SHIFT        0
+#define E4_XSTORM_IWARP_CONN_AG_CTX_HQ_NOT_FULL_RULE_EN_MASK   0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_HQ_NOT_FULL_RULE_EN_SHIFT  1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_ORQ_RD_FENCE_RULE_EN_MASK  0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_ORQ_RD_FENCE_RULE_EN_SHIFT 2
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE21EN_MASK              0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE21EN_SHIFT             3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED6_MASK          0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED6_SHIFT         4
+#define E4_XSTORM_IWARP_CONN_AG_CTX_ORQ_NOT_FULL_RULE_EN_MASK  0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_ORQ_NOT_FULL_RULE_EN_SHIFT 5
+#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED8_MASK          0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED8_SHIFT         6
+#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED9_MASK          0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED9_SHIFT         7
        u8 flags14;
-#define XSTORM_IWARP_CONN_AG_CTX_BIT16_MASK    0x1
-#define XSTORM_IWARP_CONN_AG_CTX_BIT16_SHIFT   0
-#define XSTORM_IWARP_CONN_AG_CTX_BIT17_MASK    0x1
-#define XSTORM_IWARP_CONN_AG_CTX_BIT17_SHIFT   1
-#define XSTORM_IWARP_CONN_AG_CTX_BIT18_MASK    0x1
-#define XSTORM_IWARP_CONN_AG_CTX_BIT18_SHIFT   2
-#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED1_MASK     0x1
-#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED1_SHIFT    3
-#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED2_MASK     0x1
-#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED2_SHIFT    4
-#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED3_MASK     0x1
-#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED3_SHIFT    5
-#define XSTORM_IWARP_CONN_AG_CTX_CF23_MASK     0x3
-#define XSTORM_IWARP_CONN_AG_CTX_CF23_SHIFT    6
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT16_MASK         0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT16_SHIFT                0
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT17_MASK         0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT17_SHIFT                1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT18_MASK         0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT18_SHIFT                2
+#define E4_XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED1_MASK  0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED1_SHIFT 3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED2_MASK  0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED2_SHIFT 4
+#define E4_XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED3_MASK  0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED3_SHIFT 5
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF23_MASK          0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF23_SHIFT         6
        u8 byte2;
        __le16 physical_q0;
        __le16 physical_q1;
@@ -8897,89 +9496,89 @@ struct xstorm_iwarp_conn_ag_ctx {
        __le32 reg17;
 };
 
-struct tstorm_iwarp_conn_ag_ctx {
+struct e4_tstorm_iwarp_conn_ag_ctx {
        u8 reserved0;
        u8 state;
        u8 flags0;
-#define TSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK     0x1
-#define TSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT    0
-#define TSTORM_IWARP_CONN_AG_CTX_BIT1_MASK     0x1
-#define TSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT    1
-#define TSTORM_IWARP_CONN_AG_CTX_BIT2_MASK     0x1
-#define TSTORM_IWARP_CONN_AG_CTX_BIT2_SHIFT    2
-#define TSTORM_IWARP_CONN_AG_CTX_MSTORM_FLUSH_MASK     0x1
-#define TSTORM_IWARP_CONN_AG_CTX_MSTORM_FLUSH_SHIFT    3
-#define TSTORM_IWARP_CONN_AG_CTX_BIT4_MASK     0x1
-#define TSTORM_IWARP_CONN_AG_CTX_BIT4_SHIFT    4
-#define TSTORM_IWARP_CONN_AG_CTX_CACHED_ORQ_MASK       0x1
-#define TSTORM_IWARP_CONN_AG_CTX_CACHED_ORQ_SHIFT      5
-#define TSTORM_IWARP_CONN_AG_CTX_CF0_MASK      0x3
-#define TSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT     6
+#define E4_TSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK  0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define E4_TSTORM_IWARP_CONN_AG_CTX_BIT1_MASK          0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT         1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_BIT2_MASK          0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_BIT2_SHIFT         2
+#define E4_TSTORM_IWARP_CONN_AG_CTX_MSTORM_FLUSH_MASK  0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_MSTORM_FLUSH_SHIFT 3
+#define E4_TSTORM_IWARP_CONN_AG_CTX_BIT4_MASK          0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_BIT4_SHIFT         4
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CACHED_ORQ_MASK    0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CACHED_ORQ_SHIFT   5
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF0_MASK           0x3
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT          6
        u8 flags1;
-#define TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_MASK       0x3
-#define TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_SHIFT      0
-#define TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_MASK   0x3
-#define TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_SHIFT  2
-#define TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_MASK   0x3
-#define TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT  4
-#define TSTORM_IWARP_CONN_AG_CTX_CF4_MASK      0x3
-#define TSTORM_IWARP_CONN_AG_CTX_CF4_SHIFT     6
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_MASK            0x3
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_SHIFT           0
+#define E4_TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_MASK                0x3
+#define E4_TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_SHIFT       2
+#define E4_TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_MASK                0x3
+#define E4_TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT       4
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF4_MASK                   0x3
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF4_SHIFT                  6
        u8 flags2;
-#define TSTORM_IWARP_CONN_AG_CTX_CF5_MASK      0x3
-#define TSTORM_IWARP_CONN_AG_CTX_CF5_SHIFT     0
-#define TSTORM_IWARP_CONN_AG_CTX_CF6_MASK      0x3
-#define TSTORM_IWARP_CONN_AG_CTX_CF6_SHIFT     2
-#define TSTORM_IWARP_CONN_AG_CTX_CF7_MASK      0x3
-#define TSTORM_IWARP_CONN_AG_CTX_CF7_SHIFT     4
-#define TSTORM_IWARP_CONN_AG_CTX_CF8_MASK      0x3
-#define TSTORM_IWARP_CONN_AG_CTX_CF8_SHIFT     6
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF5_MASK   0x3
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF5_SHIFT  0
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF6_MASK   0x3
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF6_SHIFT  2
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF7_MASK   0x3
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF7_SHIFT  4
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF8_MASK   0x3
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF8_SHIFT  6
        u8 flags3;
-#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
-#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_SHIFT        0
-#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_MASK  0x3
-#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_SHIFT 2
-#define TSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK    0x1
-#define TSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT   4
-#define TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_EN_MASK    0x1
-#define TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_EN_SHIFT   5
-#define TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_EN_MASK        0x1
-#define TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_EN_SHIFT       6
-#define TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK        0x1
-#define TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT       7
+#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_AND_TCP_HANDSHAKE_COMPLETE_MASK 0x3
+#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_AND_TCP_HANDSHAKE_COMPLETE_SHIFT 0
+#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_MASK       0x3
+#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_SHIFT      2
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK                         0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT                                4
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_EN_MASK                 0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_EN_SHIFT                        5
+#define E4_TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_EN_MASK             0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_EN_SHIFT            6
+#define E4_TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK             0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT            7
        u8 flags4;
-#define TSTORM_IWARP_CONN_AG_CTX_CF4EN_MASK    0x1
-#define TSTORM_IWARP_CONN_AG_CTX_CF4EN_SHIFT   0
-#define TSTORM_IWARP_CONN_AG_CTX_CF5EN_MASK    0x1
-#define TSTORM_IWARP_CONN_AG_CTX_CF5EN_SHIFT   1
-#define TSTORM_IWARP_CONN_AG_CTX_CF6EN_MASK    0x1
-#define TSTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT   2
-#define TSTORM_IWARP_CONN_AG_CTX_CF7EN_MASK    0x1
-#define TSTORM_IWARP_CONN_AG_CTX_CF7EN_SHIFT   3
-#define TSTORM_IWARP_CONN_AG_CTX_CF8EN_MASK    0x1
-#define TSTORM_IWARP_CONN_AG_CTX_CF8EN_SHIFT   4
-#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_MASK      0x1
-#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT     5
-#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_EN_MASK       0x1
-#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_EN_SHIFT 6
-#define TSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK  0x1
-#define TSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF4EN_MASK                         0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF4EN_SHIFT                                0
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF5EN_MASK                         0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF5EN_SHIFT                                1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF6EN_MASK                         0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT                                2
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF7EN_MASK                         0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF7EN_SHIFT                                3
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF8EN_MASK                         0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF8EN_SHIFT                                4
+#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_AND_TCP_HANDSHAKE_COMPL_EN_MASK 0x1
+#define        E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_AND_TCP_HANDSHAKE_COMPL_EN_SHIFT 5
+#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_EN_MASK    0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_EN_SHIFT   6
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK                       0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT                      7
        u8 flags5;
-#define TSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK  0x1
-#define TSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define TSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK  0x1
-#define TSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define TSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK  0x1
-#define TSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define TSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK  0x1
-#define TSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define TSTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK  0x1
-#define TSTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define TSTORM_IWARP_CONN_AG_CTX_SND_SQ_CONS_RULE_MASK 0x1
-#define TSTORM_IWARP_CONN_AG_CTX_SND_SQ_CONS_RULE_SHIFT        5
-#define TSTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK  0x1
-#define TSTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define TSTORM_IWARP_CONN_AG_CTX_RULE8EN_MASK  0x1
-#define TSTORM_IWARP_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK               0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT              0
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK               0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT              1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK               0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT              2
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK               0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT              3
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK               0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT              4
+#define E4_TSTORM_IWARP_CONN_AG_CTX_SND_SQ_CONS_RULE_MASK      0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_SND_SQ_CONS_RULE_SHIFT     5
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK               0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT              6
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE8EN_MASK               0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE8EN_SHIFT              7
        __le32 reg0;
        __le32 reg1;
        __le32 unaligned_nxt_seq;
@@ -9001,51 +9600,56 @@ struct tstorm_iwarp_conn_ag_ctx {
        __le32 last_hq_sequence;
 };
 
+/* The iwarp storm context of Tstorm */
 struct tstorm_iwarp_conn_st_ctx {
        __le32 reserved[60];
 };
 
+/* The iwarp storm context of Mstorm */
 struct mstorm_iwarp_conn_st_ctx {
        __le32 reserved[32];
 };
 
+/* The iwarp storm context of Ustorm */
 struct ustorm_iwarp_conn_st_ctx {
        __le32 reserved[24];
 };
 
-struct iwarp_conn_context {
+/* iwarp connection context */
+struct e4_iwarp_conn_context {
        struct ystorm_iwarp_conn_st_ctx ystorm_st_context;
        struct regpair ystorm_st_padding[2];
        struct pstorm_iwarp_conn_st_ctx pstorm_st_context;
        struct regpair pstorm_st_padding[2];
        struct xstorm_iwarp_conn_st_ctx xstorm_st_context;
        struct regpair xstorm_st_padding[2];
-       struct xstorm_iwarp_conn_ag_ctx xstorm_ag_context;
-       struct tstorm_iwarp_conn_ag_ctx tstorm_ag_context;
+       struct e4_xstorm_iwarp_conn_ag_ctx xstorm_ag_context;
+       struct e4_tstorm_iwarp_conn_ag_ctx tstorm_ag_context;
        struct timers_context timer_context;
-       struct ustorm_rdma_conn_ag_ctx ustorm_ag_context;
+       struct e4_ustorm_rdma_conn_ag_ctx ustorm_ag_context;
        struct tstorm_iwarp_conn_st_ctx tstorm_st_context;
        struct regpair tstorm_st_padding[2];
        struct mstorm_iwarp_conn_st_ctx mstorm_st_context;
        struct ustorm_iwarp_conn_st_ctx ustorm_st_context;
 };
 
+/* iWARP create QP params passed by driver to FW in CreateQP Request Ramrod */
 struct iwarp_create_qp_ramrod_data {
        u8 flags;
 #define IWARP_CREATE_QP_RAMROD_DATA_FMR_AND_RESERVED_EN_MASK   0x1
-#define IWARP_CREATE_QP_RAMROD_DATA_FMR_AND_RESERVED_EN_SHIFT 0
-#define IWARP_CREATE_QP_RAMROD_DATA_SIGNALED_COMP_MASK 0x1
-#define IWARP_CREATE_QP_RAMROD_DATA_SIGNALED_COMP_SHIFT        1
-#define IWARP_CREATE_QP_RAMROD_DATA_RDMA_RD_EN_MASK    0x1
-#define IWARP_CREATE_QP_RAMROD_DATA_RDMA_RD_EN_SHIFT   2
-#define IWARP_CREATE_QP_RAMROD_DATA_RDMA_WR_EN_MASK    0x1
-#define IWARP_CREATE_QP_RAMROD_DATA_RDMA_WR_EN_SHIFT   3
-#define IWARP_CREATE_QP_RAMROD_DATA_ATOMIC_EN_MASK     0x1
-#define IWARP_CREATE_QP_RAMROD_DATA_ATOMIC_EN_SHIFT    4
-#define IWARP_CREATE_QP_RAMROD_DATA_SRQ_FLG_MASK       0x1
-#define IWARP_CREATE_QP_RAMROD_DATA_SRQ_FLG_SHIFT      5
-#define IWARP_CREATE_QP_RAMROD_DATA_RESERVED0_MASK     0x3
-#define IWARP_CREATE_QP_RAMROD_DATA_RESERVED0_SHIFT    6
+#define IWARP_CREATE_QP_RAMROD_DATA_FMR_AND_RESERVED_EN_SHIFT  0
+#define IWARP_CREATE_QP_RAMROD_DATA_SIGNALED_COMP_MASK         0x1
+#define IWARP_CREATE_QP_RAMROD_DATA_SIGNALED_COMP_SHIFT                1
+#define IWARP_CREATE_QP_RAMROD_DATA_RDMA_RD_EN_MASK            0x1
+#define IWARP_CREATE_QP_RAMROD_DATA_RDMA_RD_EN_SHIFT           2
+#define IWARP_CREATE_QP_RAMROD_DATA_RDMA_WR_EN_MASK            0x1
+#define IWARP_CREATE_QP_RAMROD_DATA_RDMA_WR_EN_SHIFT           3
+#define IWARP_CREATE_QP_RAMROD_DATA_ATOMIC_EN_MASK             0x1
+#define IWARP_CREATE_QP_RAMROD_DATA_ATOMIC_EN_SHIFT            4
+#define IWARP_CREATE_QP_RAMROD_DATA_SRQ_FLG_MASK               0x1
+#define IWARP_CREATE_QP_RAMROD_DATA_SRQ_FLG_SHIFT              5
+#define IWARP_CREATE_QP_RAMROD_DATA_RESERVED0_MASK             0x3
+#define IWARP_CREATE_QP_RAMROD_DATA_RESERVED0_SHIFT            6
        u8 reserved1;
        __le16 pd;
        __le16 sq_num_pages;
@@ -9061,6 +9665,7 @@ struct iwarp_create_qp_ramrod_data {
        u8 reserved2[6];
 };
 
+/* iWARP completion queue types */
 enum iwarp_eqe_async_opcode {
        IWARP_EVENT_TYPE_ASYNC_CONNECT_COMPLETE,
        IWARP_EVENT_TYPE_ASYNC_ENHANCED_MPA_REPLY_ARRIVED,
@@ -9083,6 +9688,7 @@ struct iwarp_eqe_data_tcp_async_completion {
        u8 reserved[5];
 };
 
+/* iWARP completion queue types */
 enum iwarp_eqe_sync_opcode {
        IWARP_EVENT_TYPE_TCP_OFFLOAD =
        11,
@@ -9095,6 +9701,7 @@ enum iwarp_eqe_sync_opcode {
        MAX_IWARP_EQE_SYNC_OPCODE
 };
 
+/* iWARP EQE completion status */
 enum iwarp_fw_return_code {
        IWARP_CONN_ERROR_TCP_CONNECT_INVALID_PACKET = 5,
        IWARP_CONN_ERROR_TCP_CONNECTION_RST,
@@ -9125,54 +9732,60 @@ enum iwarp_fw_return_code {
        MAX_IWARP_FW_RETURN_CODE
 };
 
+/* unaligned opaque data received from LL2 */
 struct iwarp_init_func_params {
        u8 ll2_ooo_q_index;
        u8 reserved1[7];
 };
 
+/* iwarp func init ramrod data */
 struct iwarp_init_func_ramrod_data {
        struct rdma_init_func_ramrod_data rdma;
        struct tcp_init_params tcp;
        struct iwarp_init_func_params iwarp;
 };
 
+/* iWARP QP - possible states to transition to */
 enum iwarp_modify_qp_new_state_type {
        IWARP_MODIFY_QP_STATE_CLOSING = 1,
-       IWARP_MODIFY_QP_STATE_ERROR =
-       2,
+       IWARP_MODIFY_QP_STATE_ERROR = 2,
        MAX_IWARP_MODIFY_QP_NEW_STATE_TYPE
 };
 
+/* iwarp modify qp responder ramrod data */
 struct iwarp_modify_qp_ramrod_data {
        __le16 transition_to_state;
        __le16 flags;
-#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_RD_EN_MASK    0x1
-#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_RD_EN_SHIFT   0
-#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_WR_EN_MASK    0x1
-#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_WR_EN_SHIFT   1
-#define IWARP_MODIFY_QP_RAMROD_DATA_ATOMIC_EN_MASK     0x1
-#define IWARP_MODIFY_QP_RAMROD_DATA_ATOMIC_EN_SHIFT    2
-#define IWARP_MODIFY_QP_RAMROD_DATA_STATE_TRANS_EN_MASK        0x1
+#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_RD_EN_MASK            0x1
+#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_RD_EN_SHIFT           0
+#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_WR_EN_MASK            0x1
+#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_WR_EN_SHIFT           1
+#define IWARP_MODIFY_QP_RAMROD_DATA_ATOMIC_EN_MASK             0x1
+#define IWARP_MODIFY_QP_RAMROD_DATA_ATOMIC_EN_SHIFT            2
+#define IWARP_MODIFY_QP_RAMROD_DATA_STATE_TRANS_EN_MASK                0x1
 #define IWARP_MODIFY_QP_RAMROD_DATA_STATE_TRANS_EN_SHIFT       3
 #define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_OPS_EN_FLG_MASK       0x1
-#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_OPS_EN_FLG_SHIFT 4
-#define IWARP_MODIFY_QP_RAMROD_DATA_RESERVED_MASK      0x7FF
-#define IWARP_MODIFY_QP_RAMROD_DATA_RESERVED_SHIFT     5
+#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_OPS_EN_FLG_SHIFT      4
+#define IWARP_MODIFY_QP_RAMROD_DATA_RESERVED_MASK              0x7FF
+#define IWARP_MODIFY_QP_RAMROD_DATA_RESERVED_SHIFT             5
        __le32 reserved3[3];
        __le32 reserved4[8];
 };
 
+/* MPA params for Enhanced mode */
 struct mpa_rq_params {
        __le32 ird;
        __le32 ord;
 };
 
+/* MPA host Address-Len for private data */
 struct mpa_ulp_buffer {
        struct regpair addr;
        __le16 len;
        __le16 reserved[3];
 };
 
+/* iWARP MPA offload params common to Basic and Enhanced modes */
 struct mpa_outgoing_params {
        u8 crc_needed;
        u8 reject;
@@ -9181,6 +9794,9 @@ struct mpa_outgoing_params {
        struct mpa_ulp_buffer outgoing_ulp_buffer;
 };
 
+/* iWARP MPA offload params passed by driver to FW in MPA Offload Request
+ * Ramrod.
+ */
 struct iwarp_mpa_offload_ramrod_data {
        struct mpa_outgoing_params common;
        __le32 tcp_cid;
@@ -9188,18 +9804,20 @@ struct iwarp_mpa_offload_ramrod_data {
        u8 tcp_connect_side;
        u8 rtr_pref;
 #define IWARP_MPA_OFFLOAD_RAMROD_DATA_RTR_SUPPORTED_MASK       0x7
-#define IWARP_MPA_OFFLOAD_RAMROD_DATA_RTR_SUPPORTED_SHIFT 0
-#define IWARP_MPA_OFFLOAD_RAMROD_DATA_RESERVED1_MASK   0x1F
-#define IWARP_MPA_OFFLOAD_RAMROD_DATA_RESERVED1_SHIFT  3
+#define IWARP_MPA_OFFLOAD_RAMROD_DATA_RTR_SUPPORTED_SHIFT      0
+#define IWARP_MPA_OFFLOAD_RAMROD_DATA_RESERVED1_MASK           0x1F
+#define IWARP_MPA_OFFLOAD_RAMROD_DATA_RESERVED1_SHIFT          3
        u8 reserved2;
        struct mpa_ulp_buffer incoming_ulp_buffer;
        struct regpair async_eqe_output_buf;
        struct regpair handle_for_async;
        struct regpair shared_queue_addr;
+       __le16 rcv_wnd;
        u8 stats_counter_id;
-       u8 reserved3[15];
+       u8 reserved3[13];
 };
 
+/* iWARP TCP connection offload params passed by driver to FW */
 struct iwarp_offload_params {
        struct mpa_ulp_buffer incoming_ulp_buffer;
        struct regpair async_eqe_output_buf;
@@ -9211,22 +9829,24 @@ struct iwarp_offload_params {
        u8 reserved[10];
 };
 
+/* iWARP query QP output params */
 struct iwarp_query_qp_output_params {
        __le32 flags;
 #define IWARP_QUERY_QP_OUTPUT_PARAMS_ERROR_FLG_MASK    0x1
-#define IWARP_QUERY_QP_OUTPUT_PARAMS_ERROR_FLG_SHIFT 0
+#define IWARP_QUERY_QP_OUTPUT_PARAMS_ERROR_FLG_SHIFT   0
 #define IWARP_QUERY_QP_OUTPUT_PARAMS_RESERVED0_MASK    0x7FFFFFFF
-#define IWARP_QUERY_QP_OUTPUT_PARAMS_RESERVED0_SHIFT 1
+#define IWARP_QUERY_QP_OUTPUT_PARAMS_RESERVED0_SHIFT   1
        u8 reserved1[4];
 };
 
+/* iWARP query QP ramrod data */
 struct iwarp_query_qp_ramrod_data {
        struct regpair output_params_addr;
 };
 
+/* iWARP Ramrod Command IDs */
 enum iwarp_ramrod_cmd_id {
-       IWARP_RAMROD_CMD_ID_TCP_OFFLOAD =
-       11,
+       IWARP_RAMROD_CMD_ID_TCP_OFFLOAD = 11,
        IWARP_RAMROD_CMD_ID_MPA_OFFLOAD,
        IWARP_RAMROD_CMD_ID_MPA_OFFLOAD_SEND_RTR,
        IWARP_RAMROD_CMD_ID_CREATE_QP,
@@ -9236,22 +9856,28 @@ enum iwarp_ramrod_cmd_id {
        MAX_IWARP_RAMROD_CMD_ID
 };
 
+/* Per PF iWARP retransmit path statistics */
 struct iwarp_rxmit_stats_drv {
        struct regpair tx_go_to_slow_start_event_cnt;
        struct regpair tx_fast_retransmit_event_cnt;
 };
 
+/* iWARP and TCP connection offload params passed by driver to FW in iWARP
+ * offload ramrod.
+ */
 struct iwarp_tcp_offload_ramrod_data {
        struct iwarp_offload_params iwarp;
        struct tcp_offload_params_opt2 tcp;
 };
 
+/* iWARP MPA negotiation types */
 enum mpa_negotiation_mode {
        MPA_NEGOTIATION_TYPE_BASIC = 1,
        MPA_NEGOTIATION_TYPE_ENHANCED = 2,
        MAX_MPA_NEGOTIATION_MODE
 };
 
+/* iWARP MPA Enhanced mode RTR types */
 enum mpa_rtr_type {
        MPA_RTR_TYPE_NONE = 0,
        MPA_RTR_TYPE_ZERO_SEND = 1,
@@ -9264,113 +9890,114 @@ enum mpa_rtr_type {
        MAX_MPA_RTR_TYPE
 };
 
+/* unaligned opaque data received from LL2 */
 struct unaligned_opaque_data {
        __le16 first_mpa_offset;
        u8 tcp_payload_offset;
        u8 flags;
 #define UNALIGNED_OPAQUE_DATA_PKT_REACHED_WIN_RIGHT_EDGE_MASK  0x1
-#define UNALIGNED_OPAQUE_DATA_PKT_REACHED_WIN_RIGHT_EDGE_SHIFT 0
-#define UNALIGNED_OPAQUE_DATA_CONNECTION_CLOSED_MASK   0x1
-#define UNALIGNED_OPAQUE_DATA_CONNECTION_CLOSED_SHIFT  1
-#define UNALIGNED_OPAQUE_DATA_RESERVED_MASK    0x3F
-#define UNALIGNED_OPAQUE_DATA_RESERVED_SHIFT   2
+#define UNALIGNED_OPAQUE_DATA_PKT_REACHED_WIN_RIGHT_EDGE_SHIFT 0
+#define UNALIGNED_OPAQUE_DATA_CONNECTION_CLOSED_MASK           0x1
+#define UNALIGNED_OPAQUE_DATA_CONNECTION_CLOSED_SHIFT          1
+#define UNALIGNED_OPAQUE_DATA_RESERVED_MASK                    0x3F
+#define UNALIGNED_OPAQUE_DATA_RESERVED_SHIFT                   2
        __le32 cid;
 };
 
-struct mstorm_iwarp_conn_ag_ctx {
+struct e4_mstorm_iwarp_conn_ag_ctx {
        u8 reserved;
        u8 state;
        u8 flags0;
-#define MSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK     0x1
-#define MSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT    0
-#define MSTORM_IWARP_CONN_AG_CTX_BIT1_MASK     0x1
-#define MSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT    1
-#define MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_MASK 0x3
-#define MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_SHIFT        2
-#define MSTORM_IWARP_CONN_AG_CTX_CF1_MASK      0x3
-#define MSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT     4
-#define MSTORM_IWARP_CONN_AG_CTX_CF2_MASK      0x3
-#define MSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT     6
+#define E4_MSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK          0x1
+#define E4_MSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT         0
+#define E4_MSTORM_IWARP_CONN_AG_CTX_BIT1_MASK                  0x1
+#define E4_MSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT                 1
+#define E4_MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_MASK      0x3
+#define E4_MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_SHIFT     2
+#define E4_MSTORM_IWARP_CONN_AG_CTX_CF1_MASK                   0x3
+#define E4_MSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT                  4
+#define E4_MSTORM_IWARP_CONN_AG_CTX_CF2_MASK                   0x3
+#define E4_MSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT                  6
        u8 flags1;
-#define MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_EN_MASK      0x1
-#define MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_EN_SHIFT 0
-#define MSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK    0x1
-#define MSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT   1
-#define MSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK    0x1
-#define MSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT   2
-#define MSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK  0x1
-#define MSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define MSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK  0x1
-#define MSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define MSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK  0x1
-#define MSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define MSTORM_IWARP_CONN_AG_CTX_RCQ_CONS_EN_MASK      0x1
-#define MSTORM_IWARP_CONN_AG_CTX_RCQ_CONS_EN_SHIFT     6
-#define MSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK  0x1
-#define MSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define E4_MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_EN_MASK   0x1
+#define E4_MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_EN_SHIFT  0
+#define E4_MSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK                 0x1
+#define E4_MSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT                        1
+#define E4_MSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK                 0x1
+#define E4_MSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT                        2
+#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK               0x1
+#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT              3
+#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK               0x1
+#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT              4
+#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK               0x1
+#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT              5
+#define E4_MSTORM_IWARP_CONN_AG_CTX_RCQ_CONS_EN_MASK           0x1
+#define E4_MSTORM_IWARP_CONN_AG_CTX_RCQ_CONS_EN_SHIFT          6
+#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK               0x1
+#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT              7
        __le16 rcq_cons;
        __le16 rcq_cons_th;
        __le32 reg0;
        __le32 reg1;
 };
 
-struct ustorm_iwarp_conn_ag_ctx {
+struct e4_ustorm_iwarp_conn_ag_ctx {
        u8 reserved;
        u8 byte1;
        u8 flags0;
-#define USTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK     0x1
-#define USTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT    0
-#define USTORM_IWARP_CONN_AG_CTX_BIT1_MASK     0x1
-#define USTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT    1
-#define USTORM_IWARP_CONN_AG_CTX_CF0_MASK      0x3
-#define USTORM_IWARP_CONN_AG_CTX_CF0_SHIFT     2
-#define USTORM_IWARP_CONN_AG_CTX_CF1_MASK      0x3
-#define USTORM_IWARP_CONN_AG_CTX_CF1_SHIFT     4
-#define USTORM_IWARP_CONN_AG_CTX_CF2_MASK      0x3
-#define USTORM_IWARP_CONN_AG_CTX_CF2_SHIFT     6
+#define E4_USTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK  0x1
+#define E4_USTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define E4_USTORM_IWARP_CONN_AG_CTX_BIT1_MASK          0x1
+#define E4_USTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT         1
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF0_MASK           0x3
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF0_SHIFT          2
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF1_MASK           0x3
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF1_SHIFT          4
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF2_MASK           0x3
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF2_SHIFT          6
        u8 flags1;
-#define USTORM_IWARP_CONN_AG_CTX_CF3_MASK      0x3
-#define USTORM_IWARP_CONN_AG_CTX_CF3_SHIFT     0
-#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_MASK     0x3
-#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_SHIFT    2
-#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_MASK        0x3
-#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_SHIFT       4
-#define USTORM_IWARP_CONN_AG_CTX_CF6_MASK      0x3
-#define USTORM_IWARP_CONN_AG_CTX_CF6_SHIFT     6
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF3_MASK           0x3
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF3_SHIFT          0
+#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_MASK  0x3
+#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_SHIFT 2
+#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_MASK     0x3
+#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_SHIFT    4
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF6_MASK           0x3
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF6_SHIFT          6
        u8 flags2;
-#define USTORM_IWARP_CONN_AG_CTX_CF0EN_MASK    0x1
-#define USTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT   0
-#define USTORM_IWARP_CONN_AG_CTX_CF1EN_MASK    0x1
-#define USTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT   1
-#define USTORM_IWARP_CONN_AG_CTX_CF2EN_MASK    0x1
-#define USTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT   2
-#define USTORM_IWARP_CONN_AG_CTX_CF3EN_MASK    0x1
-#define USTORM_IWARP_CONN_AG_CTX_CF3EN_SHIFT   3
-#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_EN_MASK  0x1
-#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_EN_SHIFT 4
-#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_EN_MASK     0x1
-#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_EN_SHIFT    5
-#define USTORM_IWARP_CONN_AG_CTX_CF6EN_MASK    0x1
-#define USTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT   6
-#define USTORM_IWARP_CONN_AG_CTX_CQ_SE_EN_MASK 0x1
-#define USTORM_IWARP_CONN_AG_CTX_CQ_SE_EN_SHIFT        7
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF0EN_MASK                 0x1
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT                        0
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF1EN_MASK                 0x1
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT                        1
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF2EN_MASK                 0x1
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT                        2
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF3EN_MASK                 0x1
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF3EN_SHIFT                        3
+#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_EN_MASK       0x1
+#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_EN_SHIFT      4
+#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_EN_MASK          0x1
+#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_EN_SHIFT         5
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF6EN_MASK                 0x1
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT                        6
+#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_SE_EN_MASK              0x1
+#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_SE_EN_SHIFT             7
        u8 flags3;
-#define USTORM_IWARP_CONN_AG_CTX_CQ_EN_MASK    0x1
-#define USTORM_IWARP_CONN_AG_CTX_CQ_EN_SHIFT   0
-#define USTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK  0x1
-#define USTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define USTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK  0x1
-#define USTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define USTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK  0x1
-#define USTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define USTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK  0x1
-#define USTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define USTORM_IWARP_CONN_AG_CTX_RULE6EN_MASK  0x1
-#define USTORM_IWARP_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define USTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK  0x1
-#define USTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define USTORM_IWARP_CONN_AG_CTX_RULE8EN_MASK  0x1
-#define USTORM_IWARP_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_EN_MASK         0x1
+#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_EN_SHIFT                0
+#define E4_USTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK       0x1
+#define E4_USTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT      1
+#define E4_USTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK       0x1
+#define E4_USTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT      2
+#define E4_USTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK       0x1
+#define E4_USTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT      3
+#define E4_USTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK       0x1
+#define E4_USTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT      4
+#define E4_USTORM_IWARP_CONN_AG_CTX_RULE6EN_MASK       0x1
+#define E4_USTORM_IWARP_CONN_AG_CTX_RULE6EN_SHIFT      5
+#define E4_USTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK       0x1
+#define E4_USTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT      6
+#define E4_USTORM_IWARP_CONN_AG_CTX_RULE8EN_MASK       0x1
+#define E4_USTORM_IWARP_CONN_AG_CTX_RULE8EN_SHIFT      7
        u8 byte2;
        u8 byte3;
        __le16 word0;
@@ -9383,37 +10010,37 @@ struct ustorm_iwarp_conn_ag_ctx {
        __le16 word3;
 };
 
-struct ystorm_iwarp_conn_ag_ctx {
+struct e4_ystorm_iwarp_conn_ag_ctx {
        u8 byte0;
        u8 byte1;
        u8 flags0;
-#define YSTORM_IWARP_CONN_AG_CTX_BIT0_MASK     0x1
-#define YSTORM_IWARP_CONN_AG_CTX_BIT0_SHIFT    0
-#define YSTORM_IWARP_CONN_AG_CTX_BIT1_MASK     0x1
-#define YSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT    1
-#define YSTORM_IWARP_CONN_AG_CTX_CF0_MASK      0x3
-#define YSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT     2
-#define YSTORM_IWARP_CONN_AG_CTX_CF1_MASK      0x3
-#define YSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT     4
-#define YSTORM_IWARP_CONN_AG_CTX_CF2_MASK      0x3
-#define YSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT     6
+#define E4_YSTORM_IWARP_CONN_AG_CTX_BIT0_MASK  0x1
+#define E4_YSTORM_IWARP_CONN_AG_CTX_BIT0_SHIFT 0
+#define E4_YSTORM_IWARP_CONN_AG_CTX_BIT1_MASK  0x1
+#define E4_YSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_YSTORM_IWARP_CONN_AG_CTX_CF0_MASK   0x3
+#define E4_YSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT  2
+#define E4_YSTORM_IWARP_CONN_AG_CTX_CF1_MASK   0x3
+#define E4_YSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT  4
+#define E4_YSTORM_IWARP_CONN_AG_CTX_CF2_MASK   0x3
+#define E4_YSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT  6
        u8 flags1;
-#define YSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK    0x1
-#define YSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT   0
-#define YSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK    0x1
-#define YSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT   1
-#define YSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK    0x1
-#define YSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT   2
-#define YSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK  0x1
-#define YSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define YSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK  0x1
-#define YSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define YSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK  0x1
-#define YSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define YSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK  0x1
-#define YSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define YSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK  0x1
-#define YSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define E4_YSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK         0x1
+#define E4_YSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT                0
+#define E4_YSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK         0x1
+#define E4_YSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT                1
+#define E4_YSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK         0x1
+#define E4_YSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT                2
+#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK       0x1
+#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT      3
+#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK       0x1
+#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT      4
+#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK       0x1
+#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT      5
+#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK       0x1
+#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT      6
+#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK       0x1
+#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT      7
        u8 byte2;
        u8 byte3;
        __le16 word0;
@@ -9427,6 +10054,7 @@ struct ystorm_iwarp_conn_ag_ctx {
        __le32 reg3;
 };
 
+/* The fcoe storm context of Ystorm */
 struct ystorm_fcoe_conn_st_ctx {
        u8 func_mode;
        u8 cos;
@@ -9442,45 +10070,49 @@ struct ystorm_fcoe_conn_st_ctx {
        struct regpair reserved;
        __le16 min_frame_size;
        u8 protection_info_flags;
-#define YSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_MASK  0x1
-#define YSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_SHIFT 0
-#define YSTORM_FCOE_CONN_ST_CTX_VALID_MASK               0x1
-#define YSTORM_FCOE_CONN_ST_CTX_VALID_SHIFT              1
-#define YSTORM_FCOE_CONN_ST_CTX_RESERVED1_MASK           0x3F
-#define YSTORM_FCOE_CONN_ST_CTX_RESERVED1_SHIFT          2
+#define YSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_MASK                0x1
+#define YSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_SHIFT       0
+#define YSTORM_FCOE_CONN_ST_CTX_VALID_MASK                     0x1
+#define YSTORM_FCOE_CONN_ST_CTX_VALID_SHIFT                    1
+#define YSTORM_FCOE_CONN_ST_CTX_RESERVED1_MASK                 0x3F
+#define YSTORM_FCOE_CONN_ST_CTX_RESERVED1_SHIFT                        2
        u8 dst_protection_per_mss;
        u8 src_protection_per_mss;
        u8 ptu_log_page_size;
        u8 flags;
-#define YSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_MASK     0x1
-#define YSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_SHIFT    0
-#define YSTORM_FCOE_CONN_ST_CTX_OUTER_VLAN_FLAG_MASK     0x1
-#define YSTORM_FCOE_CONN_ST_CTX_OUTER_VLAN_FLAG_SHIFT    1
-#define YSTORM_FCOE_CONN_ST_CTX_RSRV_MASK                0x3F
-#define YSTORM_FCOE_CONN_ST_CTX_RSRV_SHIFT               2
+#define YSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_MASK   0x1
+#define YSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_SHIFT  0
+#define YSTORM_FCOE_CONN_ST_CTX_OUTER_VLAN_FLAG_MASK   0x1
+#define YSTORM_FCOE_CONN_ST_CTX_OUTER_VLAN_FLAG_SHIFT  1
+#define YSTORM_FCOE_CONN_ST_CTX_RSRV_MASK              0x3F
+#define YSTORM_FCOE_CONN_ST_CTX_RSRV_SHIFT             2
        u8 fcp_xfer_size;
 };
 
+/* FCoE 16-bits vlan structure */
 struct fcoe_vlan_fields {
        __le16 fields;
-#define FCOE_VLAN_FIELDS_VID_MASK  0xFFF
-#define FCOE_VLAN_FIELDS_VID_SHIFT 0
-#define FCOE_VLAN_FIELDS_CLI_MASK  0x1
-#define FCOE_VLAN_FIELDS_CLI_SHIFT 12
-#define FCOE_VLAN_FIELDS_PRI_MASK  0x7
-#define FCOE_VLAN_FIELDS_PRI_SHIFT 13
+#define FCOE_VLAN_FIELDS_VID_MASK      0xFFF
+#define FCOE_VLAN_FIELDS_VID_SHIFT     0
+#define FCOE_VLAN_FIELDS_CLI_MASK      0x1
+#define FCOE_VLAN_FIELDS_CLI_SHIFT     12
+#define FCOE_VLAN_FIELDS_PRI_MASK      0x7
+#define FCOE_VLAN_FIELDS_PRI_SHIFT     13
 };
 
+/* FCoE 16-bits vlan union */
 union fcoe_vlan_field_union {
        struct fcoe_vlan_fields fields;
        __le16 val;
 };
 
+/* FCoE 16-bits vlan, vif union */
 union fcoe_vlan_vif_field_union {
        union fcoe_vlan_field_union vlan;
        __le16 vif;
 };
 
+/* Ethernet context section */
 struct pstorm_fcoe_eth_context_section {
        u8 remote_addr_3;
        u8 remote_addr_2;
@@ -9500,6 +10132,7 @@ struct pstorm_fcoe_eth_context_section {
        __le16 inner_eth_type;
 };
 
+/* The fcoe storm context of Pstorm */
 struct pstorm_fcoe_conn_st_ctx {
        u8 func_mode;
        u8 cos;
@@ -9513,16 +10146,18 @@ struct pstorm_fcoe_conn_st_ctx {
        u8 sid_1;
        u8 sid_0;
        u8 flags;
-#define PSTORM_FCOE_CONN_ST_CTX_VNTAG_VLAN_MASK          0x1
-#define PSTORM_FCOE_CONN_ST_CTX_VNTAG_VLAN_SHIFT         0
-#define PSTORM_FCOE_CONN_ST_CTX_SUPPORT_REC_RR_TOV_MASK  0x1
-#define PSTORM_FCOE_CONN_ST_CTX_SUPPORT_REC_RR_TOV_SHIFT 1
-#define PSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_MASK     0x1
-#define PSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_SHIFT    2
-#define PSTORM_FCOE_CONN_ST_CTX_OUTER_VLAN_FLAG_MASK     0x1
-#define PSTORM_FCOE_CONN_ST_CTX_OUTER_VLAN_FLAG_SHIFT    3
-#define PSTORM_FCOE_CONN_ST_CTX_RESERVED_MASK            0xF
-#define PSTORM_FCOE_CONN_ST_CTX_RESERVED_SHIFT           4
+#define PSTORM_FCOE_CONN_ST_CTX_VNTAG_VLAN_MASK                        0x1
+#define PSTORM_FCOE_CONN_ST_CTX_VNTAG_VLAN_SHIFT               0
+#define PSTORM_FCOE_CONN_ST_CTX_SUPPORT_REC_RR_TOV_MASK                0x1
+#define PSTORM_FCOE_CONN_ST_CTX_SUPPORT_REC_RR_TOV_SHIFT       1
+#define PSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_MASK           0x1
+#define PSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_SHIFT          2
+#define PSTORM_FCOE_CONN_ST_CTX_OUTER_VLAN_FLAG_MASK           0x1
+#define PSTORM_FCOE_CONN_ST_CTX_OUTER_VLAN_FLAG_SHIFT          3
+#define PSTORM_FCOE_CONN_ST_CTX_SINGLE_VLAN_FLAG_MASK          0x1
+#define PSTORM_FCOE_CONN_ST_CTX_SINGLE_VLAN_FLAG_SHIFT         4
+#define PSTORM_FCOE_CONN_ST_CTX_RESERVED_MASK                  0x7
+#define PSTORM_FCOE_CONN_ST_CTX_RESERVED_SHIFT                 5
        u8 did_2;
        u8 did_1;
        u8 did_0;
@@ -9532,6 +10167,7 @@ struct pstorm_fcoe_conn_st_ctx {
        u8 reserved1;
 };
 
+/* The fcoe storm context of Xstorm */
 struct xstorm_fcoe_conn_st_ctx {
        u8 func_mode;
        u8 src_mac_index;
@@ -9539,16 +10175,16 @@ struct xstorm_fcoe_conn_st_ctx {
        u8 cached_wqes_avail;
        __le16 stat_ram_addr;
        u8 flags;
-#define XSTORM_FCOE_CONN_ST_CTX_SQ_DEFERRED_MASK             0x1
-#define XSTORM_FCOE_CONN_ST_CTX_SQ_DEFERRED_SHIFT            0
-#define XSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_MASK         0x1
-#define XSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_SHIFT        1
-#define XSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_ORIG_MASK    0x1
-#define XSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_ORIG_SHIFT   2
-#define XSTORM_FCOE_CONN_ST_CTX_LAST_QUEUE_HANDLED_MASK      0x3
-#define XSTORM_FCOE_CONN_ST_CTX_LAST_QUEUE_HANDLED_SHIFT     3
-#define XSTORM_FCOE_CONN_ST_CTX_RSRV_MASK                    0x7
-#define XSTORM_FCOE_CONN_ST_CTX_RSRV_SHIFT                   5
+#define XSTORM_FCOE_CONN_ST_CTX_SQ_DEFERRED_MASK               0x1
+#define XSTORM_FCOE_CONN_ST_CTX_SQ_DEFERRED_SHIFT              0
+#define XSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_MASK           0x1
+#define XSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_SHIFT          1
+#define XSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_ORIG_MASK      0x1
+#define XSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_ORIG_SHIFT     2
+#define XSTORM_FCOE_CONN_ST_CTX_LAST_QUEUE_HANDLED_MASK                0x3
+#define XSTORM_FCOE_CONN_ST_CTX_LAST_QUEUE_HANDLED_SHIFT       3
+#define XSTORM_FCOE_CONN_ST_CTX_RSRV_MASK                      0x7
+#define XSTORM_FCOE_CONN_ST_CTX_RSRV_SHIFT                     5
        u8 cached_wqes_offset;
        u8 reserved2;
        u8 eth_hdr_size;
@@ -9574,18 +10210,18 @@ struct xstorm_fcoe_conn_st_ctx {
        u8 fcp_cmd_byte_credit;
        u8 fcp_rsp_byte_credit;
        __le16 protection_info;
-#define XSTORM_FCOE_CONN_ST_CTX_PROTECTION_PERF_MASK         0x1
-#define XSTORM_FCOE_CONN_ST_CTX_PROTECTION_PERF_SHIFT        0
-#define XSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_MASK      0x1
-#define XSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_SHIFT     1
-#define XSTORM_FCOE_CONN_ST_CTX_VALID_MASK                   0x1
-#define XSTORM_FCOE_CONN_ST_CTX_VALID_SHIFT                  2
-#define XSTORM_FCOE_CONN_ST_CTX_FRAME_PROT_ALIGNED_MASK      0x1
-#define XSTORM_FCOE_CONN_ST_CTX_FRAME_PROT_ALIGNED_SHIFT     3
-#define XSTORM_FCOE_CONN_ST_CTX_RESERVED3_MASK               0xF
-#define XSTORM_FCOE_CONN_ST_CTX_RESERVED3_SHIFT              4
-#define XSTORM_FCOE_CONN_ST_CTX_DST_PROTECTION_PER_MSS_MASK  0xFF
-#define XSTORM_FCOE_CONN_ST_CTX_DST_PROTECTION_PER_MSS_SHIFT 8
+#define XSTORM_FCOE_CONN_ST_CTX_PROTECTION_PERF_MASK           0x1
+#define XSTORM_FCOE_CONN_ST_CTX_PROTECTION_PERF_SHIFT          0
+#define XSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_MASK                0x1
+#define XSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_SHIFT       1
+#define XSTORM_FCOE_CONN_ST_CTX_VALID_MASK                     0x1
+#define XSTORM_FCOE_CONN_ST_CTX_VALID_SHIFT                    2
+#define XSTORM_FCOE_CONN_ST_CTX_FRAME_PROT_ALIGNED_MASK                0x1
+#define XSTORM_FCOE_CONN_ST_CTX_FRAME_PROT_ALIGNED_SHIFT       3
+#define XSTORM_FCOE_CONN_ST_CTX_RESERVED3_MASK                 0xF
+#define XSTORM_FCOE_CONN_ST_CTX_RESERVED3_SHIFT                        4
+#define XSTORM_FCOE_CONN_ST_CTX_DST_PROTECTION_PER_MSS_MASK    0xFF
+#define XSTORM_FCOE_CONN_ST_CTX_DST_PROTECTION_PER_MSS_SHIFT   8
        __le16 xferq_pbl_next_index;
        __le16 page_size;
        u8 mid_seq;
@@ -9594,216 +10230,216 @@ struct xstorm_fcoe_conn_st_ctx {
        struct fcoe_wqe cached_wqes[16];
 };
 
-struct xstorm_fcoe_conn_ag_ctx {
+struct e4_xstorm_fcoe_conn_ag_ctx {
        u8 reserved0;
-       u8 fcoe_state;
+       u8 state;
        u8 flags0;
-#define XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_MASK       0x1
-#define XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT      0
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED1_MASK          0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED1_SHIFT         1
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED2_MASK          0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED2_SHIFT         2
-#define XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM3_MASK       0x1
-#define XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT      3
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED3_MASK          0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED3_SHIFT         4
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED4_MASK          0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED4_SHIFT         5
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED5_MASK          0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED5_SHIFT         6
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED6_MASK          0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED6_SHIFT         7
+#define E4_XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_MASK   0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT  0
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED1_MASK      0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED1_SHIFT     1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED2_MASK      0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED2_SHIFT     2
+#define E4_XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM3_MASK   0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT  3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED3_MASK      0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED3_SHIFT     4
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED4_MASK      0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED4_SHIFT     5
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED5_MASK      0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED5_SHIFT     6
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED6_MASK      0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED6_SHIFT     7
        u8 flags1;
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED7_MASK          0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED7_SHIFT         0
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED8_MASK          0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED8_SHIFT         1
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED9_MASK          0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED9_SHIFT         2
-#define XSTORM_FCOE_CONN_AG_CTX_BIT11_MASK              0x1
-#define XSTORM_FCOE_CONN_AG_CTX_BIT11_SHIFT             3
-#define XSTORM_FCOE_CONN_AG_CTX_BIT12_MASK              0x1
-#define XSTORM_FCOE_CONN_AG_CTX_BIT12_SHIFT             4
-#define XSTORM_FCOE_CONN_AG_CTX_BIT13_MASK              0x1
-#define XSTORM_FCOE_CONN_AG_CTX_BIT13_SHIFT             5
-#define XSTORM_FCOE_CONN_AG_CTX_BIT14_MASK              0x1
-#define XSTORM_FCOE_CONN_AG_CTX_BIT14_SHIFT             6
-#define XSTORM_FCOE_CONN_AG_CTX_BIT15_MASK              0x1
-#define XSTORM_FCOE_CONN_AG_CTX_BIT15_SHIFT             7
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED7_MASK      0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED7_SHIFT     0
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED8_MASK      0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED8_SHIFT     1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED9_MASK      0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED9_SHIFT     2
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT11_MASK          0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT11_SHIFT         3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT12_MASK          0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT12_SHIFT         4
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT13_MASK          0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT13_SHIFT         5
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT14_MASK          0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT14_SHIFT         6
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT15_MASK          0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT15_SHIFT         7
        u8 flags2;
-#define XSTORM_FCOE_CONN_AG_CTX_CF0_MASK                0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT               0
-#define XSTORM_FCOE_CONN_AG_CTX_CF1_MASK                0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT               2
-#define XSTORM_FCOE_CONN_AG_CTX_CF2_MASK                0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT               4
-#define XSTORM_FCOE_CONN_AG_CTX_CF3_MASK                0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF3_SHIFT               6
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF0_MASK    0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT   0
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF1_MASK    0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT   2
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF2_MASK    0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT   4
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF3_MASK    0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF3_SHIFT   6
        u8 flags3;
-#define XSTORM_FCOE_CONN_AG_CTX_CF4_MASK                0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF4_SHIFT               0
-#define XSTORM_FCOE_CONN_AG_CTX_CF5_MASK                0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF5_SHIFT               2
-#define XSTORM_FCOE_CONN_AG_CTX_CF6_MASK                0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF6_SHIFT               4
-#define XSTORM_FCOE_CONN_AG_CTX_CF7_MASK                0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF7_SHIFT               6
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF4_MASK    0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF4_SHIFT   0
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF5_MASK    0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF5_SHIFT   2
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF6_MASK    0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF6_SHIFT   4
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF7_MASK    0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF7_SHIFT   6
        u8 flags4;
-#define XSTORM_FCOE_CONN_AG_CTX_CF8_MASK                0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF8_SHIFT               0
-#define XSTORM_FCOE_CONN_AG_CTX_CF9_MASK                0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF9_SHIFT               2
-#define XSTORM_FCOE_CONN_AG_CTX_CF10_MASK               0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF10_SHIFT              4
-#define XSTORM_FCOE_CONN_AG_CTX_CF11_MASK               0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF11_SHIFT              6
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF8_MASK    0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF8_SHIFT   0
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF9_MASK    0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF9_SHIFT   2
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF10_MASK   0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF10_SHIFT  4
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF11_MASK   0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF11_SHIFT  6
        u8 flags5;
-#define XSTORM_FCOE_CONN_AG_CTX_CF12_MASK               0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF12_SHIFT              0
-#define XSTORM_FCOE_CONN_AG_CTX_CF13_MASK               0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF13_SHIFT              2
-#define XSTORM_FCOE_CONN_AG_CTX_CF14_MASK               0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF14_SHIFT              4
-#define XSTORM_FCOE_CONN_AG_CTX_CF15_MASK               0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF15_SHIFT              6
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF12_MASK   0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF12_SHIFT  0
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF13_MASK   0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF13_SHIFT  2
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF14_MASK   0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF14_SHIFT  4
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF15_MASK   0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF15_SHIFT  6
        u8 flags6;
-#define XSTORM_FCOE_CONN_AG_CTX_CF16_MASK               0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF16_SHIFT              0
-#define XSTORM_FCOE_CONN_AG_CTX_CF17_MASK               0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF17_SHIFT              2
-#define XSTORM_FCOE_CONN_AG_CTX_CF18_MASK               0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF18_SHIFT              4
-#define XSTORM_FCOE_CONN_AG_CTX_DQ_CF_MASK              0x3
-#define XSTORM_FCOE_CONN_AG_CTX_DQ_CF_SHIFT             6
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF16_MASK   0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF16_SHIFT  0
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF17_MASK   0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF17_SHIFT  2
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF18_MASK   0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF18_SHIFT  4
+#define E4_XSTORM_FCOE_CONN_AG_CTX_DQ_CF_MASK  0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_DQ_CF_SHIFT 6
        u8 flags7;
-#define XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_MASK           0x3
-#define XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_SHIFT          0
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED10_MASK         0x3
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED10_SHIFT        2
-#define XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_MASK          0x3
-#define XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_SHIFT         4
-#define XSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK              0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT             6
-#define XSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK              0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT             7
+#define E4_XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_MASK       0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_SHIFT      0
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED10_MASK     0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED10_SHIFT    2
+#define E4_XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_MASK      0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_SHIFT     4
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK          0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT         6
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK          0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT         7
        u8 flags8;
-#define XSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK              0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT             0
-#define XSTORM_FCOE_CONN_AG_CTX_CF3EN_MASK              0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF3EN_SHIFT             1
-#define XSTORM_FCOE_CONN_AG_CTX_CF4EN_MASK              0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT             2
-#define XSTORM_FCOE_CONN_AG_CTX_CF5EN_MASK              0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT             3
-#define XSTORM_FCOE_CONN_AG_CTX_CF6EN_MASK              0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT             4
-#define XSTORM_FCOE_CONN_AG_CTX_CF7EN_MASK              0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF7EN_SHIFT             5
-#define XSTORM_FCOE_CONN_AG_CTX_CF8EN_MASK              0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF8EN_SHIFT             6
-#define XSTORM_FCOE_CONN_AG_CTX_CF9EN_MASK              0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF9EN_SHIFT             7
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK  0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 0
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF3EN_MASK  0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF3EN_SHIFT 1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF4EN_MASK  0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT 2
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF5EN_MASK  0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT 3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF6EN_MASK  0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT 4
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF7EN_MASK  0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF7EN_SHIFT 5
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF8EN_MASK  0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF8EN_SHIFT 6
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF9EN_MASK  0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF9EN_SHIFT 7
        u8 flags9;
-#define XSTORM_FCOE_CONN_AG_CTX_CF10EN_MASK             0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF10EN_SHIFT            0
-#define XSTORM_FCOE_CONN_AG_CTX_CF11EN_MASK             0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF11EN_SHIFT            1
-#define XSTORM_FCOE_CONN_AG_CTX_CF12EN_MASK             0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF12EN_SHIFT            2
-#define XSTORM_FCOE_CONN_AG_CTX_CF13EN_MASK             0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF13EN_SHIFT            3
-#define XSTORM_FCOE_CONN_AG_CTX_CF14EN_MASK             0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF14EN_SHIFT            4
-#define XSTORM_FCOE_CONN_AG_CTX_CF15EN_MASK             0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF15EN_SHIFT            5
-#define XSTORM_FCOE_CONN_AG_CTX_CF16EN_MASK             0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF16EN_SHIFT            6
-#define XSTORM_FCOE_CONN_AG_CTX_CF17EN_MASK             0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF17EN_SHIFT            7
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF10EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF10EN_SHIFT        0
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF11EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF11EN_SHIFT        1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF12EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF12EN_SHIFT        2
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF13EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF13EN_SHIFT        3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF14EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF14EN_SHIFT        4
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF15EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF15EN_SHIFT        5
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF16EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF16EN_SHIFT        6
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF17EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF17EN_SHIFT        7
        u8 flags10;
-#define XSTORM_FCOE_CONN_AG_CTX_CF18EN_MASK             0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF18EN_SHIFT            0
-#define XSTORM_FCOE_CONN_AG_CTX_DQ_CF_EN_MASK           0x1
-#define XSTORM_FCOE_CONN_AG_CTX_DQ_CF_EN_SHIFT          1
-#define XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_EN_MASK        0x1
-#define XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT       2
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED11_MASK         0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED11_SHIFT        3
-#define XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_EN_MASK       0x1
-#define XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT      4
-#define XSTORM_FCOE_CONN_AG_CTX_CF23EN_MASK             0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF23EN_SHIFT            5
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED12_MASK         0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED12_SHIFT        6
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED13_MASK         0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED13_SHIFT        7
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF18EN_MASK         0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF18EN_SHIFT                0
+#define E4_XSTORM_FCOE_CONN_AG_CTX_DQ_CF_EN_MASK       0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_DQ_CF_EN_SHIFT      1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_EN_MASK    0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT   2
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED11_MASK     0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED11_SHIFT    3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_EN_MASK   0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT  4
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF23EN_MASK         0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF23EN_SHIFT                5
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED12_MASK     0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED12_SHIFT    6
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED13_MASK     0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED13_SHIFT    7
        u8 flags11;
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED14_MASK         0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED14_SHIFT        0
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED15_MASK         0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED15_SHIFT        1
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED16_MASK         0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED16_SHIFT        2
-#define XSTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK            0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT           3
-#define XSTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK            0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT           4
-#define XSTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK            0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT           5
-#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED1_MASK       0x1
-#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED1_SHIFT      6
-#define XSTORM_FCOE_CONN_AG_CTX_XFERQ_DECISION_EN_MASK  0x1
-#define XSTORM_FCOE_CONN_AG_CTX_XFERQ_DECISION_EN_SHIFT 7
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED14_MASK             0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED14_SHIFT            0
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED15_MASK             0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED15_SHIFT            1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED16_MASK             0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED16_SHIFT            2
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK                        0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT               3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK                        0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT               4
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK                        0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT               5
+#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED1_MASK           0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED1_SHIFT          6
+#define E4_XSTORM_FCOE_CONN_AG_CTX_XFERQ_DECISION_EN_MASK      0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_XFERQ_DECISION_EN_SHIFT     7
        u8 flags12;
-#define XSTORM_FCOE_CONN_AG_CTX_SQ_DECISION_EN_MASK     0x1
-#define XSTORM_FCOE_CONN_AG_CTX_SQ_DECISION_EN_SHIFT    0
-#define XSTORM_FCOE_CONN_AG_CTX_RULE11EN_MASK           0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RULE11EN_SHIFT          1
-#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED2_MASK       0x1
-#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED2_SHIFT      2
-#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED3_MASK       0x1
-#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED3_SHIFT      3
-#define XSTORM_FCOE_CONN_AG_CTX_RULE14EN_MASK           0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RULE14EN_SHIFT          4
-#define XSTORM_FCOE_CONN_AG_CTX_RULE15EN_MASK           0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RULE15EN_SHIFT          5
-#define XSTORM_FCOE_CONN_AG_CTX_RULE16EN_MASK           0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RULE16EN_SHIFT          6
-#define XSTORM_FCOE_CONN_AG_CTX_RULE17EN_MASK           0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RULE17EN_SHIFT          7
+#define E4_XSTORM_FCOE_CONN_AG_CTX_SQ_DECISION_EN_MASK 0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_SQ_DECISION_EN_SHIFT        0
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE11EN_MASK       0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE11EN_SHIFT      1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED2_MASK   0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED2_SHIFT  2
+#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED3_MASK   0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED3_SHIFT  3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE14EN_MASK       0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE14EN_SHIFT      4
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE15EN_MASK       0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE15EN_SHIFT      5
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE16EN_MASK       0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE16EN_SHIFT      6
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE17EN_MASK       0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE17EN_SHIFT      7
        u8 flags13;
-#define XSTORM_FCOE_CONN_AG_CTX_RESPQ_DECISION_EN_MASK  0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RESPQ_DECISION_EN_SHIFT 0
-#define XSTORM_FCOE_CONN_AG_CTX_RULE19EN_MASK           0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RULE19EN_SHIFT          1
-#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED4_MASK       0x1
-#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED4_SHIFT      2
-#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED5_MASK       0x1
-#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED5_SHIFT      3
-#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED6_MASK       0x1
-#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED6_SHIFT      4
-#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED7_MASK       0x1
-#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED7_SHIFT      5
-#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED8_MASK       0x1
-#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED8_SHIFT      6
-#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED9_MASK       0x1
-#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED9_SHIFT      7
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESPQ_DECISION_EN_MASK      0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESPQ_DECISION_EN_SHIFT     0
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE19EN_MASK               0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE19EN_SHIFT              1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED4_MASK           0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED4_SHIFT          2
+#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED5_MASK           0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED5_SHIFT          3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED6_MASK           0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED6_SHIFT          4
+#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED7_MASK           0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED7_SHIFT          5
+#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED8_MASK           0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED8_SHIFT          6
+#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED9_MASK           0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED9_SHIFT          7
        u8 flags14;
-#define XSTORM_FCOE_CONN_AG_CTX_BIT16_MASK              0x1
-#define XSTORM_FCOE_CONN_AG_CTX_BIT16_SHIFT             0
-#define XSTORM_FCOE_CONN_AG_CTX_BIT17_MASK              0x1
-#define XSTORM_FCOE_CONN_AG_CTX_BIT17_SHIFT             1
-#define XSTORM_FCOE_CONN_AG_CTX_BIT18_MASK              0x1
-#define XSTORM_FCOE_CONN_AG_CTX_BIT18_SHIFT             2
-#define XSTORM_FCOE_CONN_AG_CTX_BIT19_MASK              0x1
-#define XSTORM_FCOE_CONN_AG_CTX_BIT19_SHIFT             3
-#define XSTORM_FCOE_CONN_AG_CTX_BIT20_MASK              0x1
-#define XSTORM_FCOE_CONN_AG_CTX_BIT20_SHIFT             4
-#define XSTORM_FCOE_CONN_AG_CTX_BIT21_MASK              0x1
-#define XSTORM_FCOE_CONN_AG_CTX_BIT21_SHIFT             5
-#define XSTORM_FCOE_CONN_AG_CTX_CF23_MASK               0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF23_SHIFT              6
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT16_MASK  0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT16_SHIFT 0
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT17_MASK  0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT17_SHIFT 1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT18_MASK  0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT18_SHIFT 2
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT19_MASK  0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT19_SHIFT 3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT20_MASK  0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT20_SHIFT 4
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT21_MASK  0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT21_SHIFT 5
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF23_MASK   0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF23_SHIFT  6
        u8 byte2;
        __le16 physical_q0;
        __le16 word1;
@@ -9831,6 +10467,7 @@ struct xstorm_fcoe_conn_ag_ctx {
        __le32 reg8;
 };
 
+/* The fcoe storm context of Ustorm */
 struct ustorm_fcoe_conn_st_ctx {
        struct regpair respq_pbl_addr;
        __le16 num_pages_in_pbl;
@@ -9840,150 +10477,150 @@ struct ustorm_fcoe_conn_st_ctx {
        u8 reserved[2];
 };
 
-struct tstorm_fcoe_conn_ag_ctx {
+struct e4_tstorm_fcoe_conn_ag_ctx {
        u8 reserved0;
-       u8 fcoe_state;
+       u8 state;
        u8 flags0;
-#define TSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_MASK          0x1
-#define TSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT         0
-#define TSTORM_FCOE_CONN_AG_CTX_BIT1_MASK                  0x1
-#define TSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT                 1
-#define TSTORM_FCOE_CONN_AG_CTX_BIT2_MASK                  0x1
-#define TSTORM_FCOE_CONN_AG_CTX_BIT2_SHIFT                 2
-#define TSTORM_FCOE_CONN_AG_CTX_BIT3_MASK                  0x1
-#define TSTORM_FCOE_CONN_AG_CTX_BIT3_SHIFT                 3
-#define TSTORM_FCOE_CONN_AG_CTX_BIT4_MASK                  0x1
-#define TSTORM_FCOE_CONN_AG_CTX_BIT4_SHIFT                 4
-#define TSTORM_FCOE_CONN_AG_CTX_BIT5_MASK                  0x1
-#define TSTORM_FCOE_CONN_AG_CTX_BIT5_SHIFT                 5
-#define TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_MASK        0x3
-#define TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_SHIFT       6
+#define E4_TSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_MASK   0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT  0
+#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT1_MASK           0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT          1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT2_MASK           0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT2_SHIFT          2
+#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT3_MASK           0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT3_SHIFT          3
+#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT4_MASK           0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT4_SHIFT          4
+#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT5_MASK           0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT5_SHIFT          5
+#define E4_TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_MASK 0x3
+#define E4_TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_SHIFT        6
        u8 flags1;
-#define TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_MASK           0x3
-#define TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT          0
-#define TSTORM_FCOE_CONN_AG_CTX_CF2_MASK                   0x3
-#define TSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT                  2
-#define TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK     0x3
-#define TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT    4
-#define TSTORM_FCOE_CONN_AG_CTX_CF4_MASK                   0x3
-#define TSTORM_FCOE_CONN_AG_CTX_CF4_SHIFT                  6
+#define E4_TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_MASK            0x3
+#define E4_TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT           0
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF2_MASK                    0x3
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT                   2
+#define E4_TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK      0x3
+#define E4_TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT     4
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF4_MASK                    0x3
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF4_SHIFT                   6
        u8 flags2;
-#define TSTORM_FCOE_CONN_AG_CTX_CF5_MASK                   0x3
-#define TSTORM_FCOE_CONN_AG_CTX_CF5_SHIFT                  0
-#define TSTORM_FCOE_CONN_AG_CTX_CF6_MASK                   0x3
-#define TSTORM_FCOE_CONN_AG_CTX_CF6_SHIFT                  2
-#define TSTORM_FCOE_CONN_AG_CTX_CF7_MASK                   0x3
-#define TSTORM_FCOE_CONN_AG_CTX_CF7_SHIFT                  4
-#define TSTORM_FCOE_CONN_AG_CTX_CF8_MASK                   0x3
-#define TSTORM_FCOE_CONN_AG_CTX_CF8_SHIFT                  6
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF5_MASK    0x3
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF5_SHIFT   0
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF6_MASK    0x3
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF6_SHIFT   2
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF7_MASK    0x3
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF7_SHIFT   4
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF8_MASK    0x3
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF8_SHIFT   6
        u8 flags3;
-#define TSTORM_FCOE_CONN_AG_CTX_CF9_MASK                   0x3
-#define TSTORM_FCOE_CONN_AG_CTX_CF9_SHIFT                  0
-#define TSTORM_FCOE_CONN_AG_CTX_CF10_MASK                  0x3
-#define TSTORM_FCOE_CONN_AG_CTX_CF10_SHIFT                 2
-#define TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN_MASK     0x1
-#define TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN_SHIFT    4
-#define TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK        0x1
-#define TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT       5
-#define TSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK                 0x1
-#define TSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT                6
-#define TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK  0x1
-#define TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT 7
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF9_MASK                    0x3
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF9_SHIFT                   0
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF10_MASK                   0x3
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF10_SHIFT                  2
+#define E4_TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN_MASK      0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN_SHIFT     4
+#define E4_TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK         0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT                5
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK                  0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT                 6
+#define E4_TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK   0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT  7
        u8 flags4;
-#define TSTORM_FCOE_CONN_AG_CTX_CF4EN_MASK                 0x1
-#define TSTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT                0
-#define TSTORM_FCOE_CONN_AG_CTX_CF5EN_MASK                 0x1
-#define TSTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT                1
-#define TSTORM_FCOE_CONN_AG_CTX_CF6EN_MASK                 0x1
-#define TSTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT                2
-#define TSTORM_FCOE_CONN_AG_CTX_CF7EN_MASK                 0x1
-#define TSTORM_FCOE_CONN_AG_CTX_CF7EN_SHIFT                3
-#define TSTORM_FCOE_CONN_AG_CTX_CF8EN_MASK                 0x1
-#define TSTORM_FCOE_CONN_AG_CTX_CF8EN_SHIFT                4
-#define TSTORM_FCOE_CONN_AG_CTX_CF9EN_MASK                 0x1
-#define TSTORM_FCOE_CONN_AG_CTX_CF9EN_SHIFT                5
-#define TSTORM_FCOE_CONN_AG_CTX_CF10EN_MASK                0x1
-#define TSTORM_FCOE_CONN_AG_CTX_CF10EN_SHIFT               6
-#define TSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK               0x1
-#define TSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT              7
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF4EN_MASK          0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT         0
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF5EN_MASK          0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT         1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF6EN_MASK          0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT         2
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF7EN_MASK          0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF7EN_SHIFT         3
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF8EN_MASK          0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF8EN_SHIFT         4
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF9EN_MASK          0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF9EN_SHIFT         5
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF10EN_MASK         0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF10EN_SHIFT                6
+#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK                0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT       7
        u8 flags5;
-#define TSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK               0x1
-#define TSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT              0
-#define TSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK               0x1
-#define TSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT              1
-#define TSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK               0x1
-#define TSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT              2
-#define TSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK               0x1
-#define TSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT              3
-#define TSTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK               0x1
-#define TSTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT              4
-#define TSTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK               0x1
-#define TSTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT              5
-#define TSTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK               0x1
-#define TSTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT              6
-#define TSTORM_FCOE_CONN_AG_CTX_RULE8EN_MASK               0x1
-#define TSTORM_FCOE_CONN_AG_CTX_RULE8EN_SHIFT              7
+#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK                0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT       0
+#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK                0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT       1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK                0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT       2
+#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK                0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT       3
+#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK                0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT       4
+#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK                0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT       5
+#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK                0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT       6
+#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE8EN_MASK                0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE8EN_SHIFT       7
        __le32 reg0;
        __le32 reg1;
 };
 
-struct ustorm_fcoe_conn_ag_ctx {
+struct e4_ustorm_fcoe_conn_ag_ctx {
        u8 byte0;
        u8 byte1;
        u8 flags0;
-#define USTORM_FCOE_CONN_AG_CTX_BIT0_MASK     0x1
-#define USTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT    0
-#define USTORM_FCOE_CONN_AG_CTX_BIT1_MASK     0x1
-#define USTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT    1
-#define USTORM_FCOE_CONN_AG_CTX_CF0_MASK      0x3
-#define USTORM_FCOE_CONN_AG_CTX_CF0_SHIFT     2
-#define USTORM_FCOE_CONN_AG_CTX_CF1_MASK      0x3
-#define USTORM_FCOE_CONN_AG_CTX_CF1_SHIFT     4
-#define USTORM_FCOE_CONN_AG_CTX_CF2_MASK      0x3
-#define USTORM_FCOE_CONN_AG_CTX_CF2_SHIFT     6
+#define E4_USTORM_FCOE_CONN_AG_CTX_BIT0_MASK   0x1
+#define E4_USTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT  0
+#define E4_USTORM_FCOE_CONN_AG_CTX_BIT1_MASK   0x1
+#define E4_USTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT  1
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF0_MASK    0x3
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF0_SHIFT   2
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF1_MASK    0x3
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF1_SHIFT   4
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF2_MASK    0x3
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF2_SHIFT   6
        u8 flags1;
-#define USTORM_FCOE_CONN_AG_CTX_CF3_MASK      0x3
-#define USTORM_FCOE_CONN_AG_CTX_CF3_SHIFT     0
-#define USTORM_FCOE_CONN_AG_CTX_CF4_MASK      0x3
-#define USTORM_FCOE_CONN_AG_CTX_CF4_SHIFT     2
-#define USTORM_FCOE_CONN_AG_CTX_CF5_MASK      0x3
-#define USTORM_FCOE_CONN_AG_CTX_CF5_SHIFT     4
-#define USTORM_FCOE_CONN_AG_CTX_CF6_MASK      0x3
-#define USTORM_FCOE_CONN_AG_CTX_CF6_SHIFT     6
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF3_MASK    0x3
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF3_SHIFT   0
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF4_MASK    0x3
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF4_SHIFT   2
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF5_MASK    0x3
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF5_SHIFT   4
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF6_MASK    0x3
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF6_SHIFT   6
        u8 flags2;
-#define USTORM_FCOE_CONN_AG_CTX_CF0EN_MASK    0x1
-#define USTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT   0
-#define USTORM_FCOE_CONN_AG_CTX_CF1EN_MASK    0x1
-#define USTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT   1
-#define USTORM_FCOE_CONN_AG_CTX_CF2EN_MASK    0x1
-#define USTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT   2
-#define USTORM_FCOE_CONN_AG_CTX_CF3EN_MASK    0x1
-#define USTORM_FCOE_CONN_AG_CTX_CF3EN_SHIFT   3
-#define USTORM_FCOE_CONN_AG_CTX_CF4EN_MASK    0x1
-#define USTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT   4
-#define USTORM_FCOE_CONN_AG_CTX_CF5EN_MASK    0x1
-#define USTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT   5
-#define USTORM_FCOE_CONN_AG_CTX_CF6EN_MASK    0x1
-#define USTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT   6
-#define USTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK  0x1
-#define USTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF0EN_MASK          0x1
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT         0
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF1EN_MASK          0x1
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT         1
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF2EN_MASK          0x1
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT         2
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF3EN_MASK          0x1
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF3EN_SHIFT         3
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF4EN_MASK          0x1
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT         4
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF5EN_MASK          0x1
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT         5
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF6EN_MASK          0x1
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT         6
+#define E4_USTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK                0x1
+#define E4_USTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT       7
        u8 flags3;
-#define USTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK  0x1
-#define USTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define USTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK  0x1
-#define USTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define USTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK  0x1
-#define USTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define USTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK  0x1
-#define USTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define USTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK  0x1
-#define USTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define USTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK  0x1
-#define USTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define USTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK  0x1
-#define USTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define USTORM_FCOE_CONN_AG_CTX_RULE8EN_MASK  0x1
-#define USTORM_FCOE_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define E4_USTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK                0x1
+#define E4_USTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT       0
+#define E4_USTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK                0x1
+#define E4_USTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT       1
+#define E4_USTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK                0x1
+#define E4_USTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT       2
+#define E4_USTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK                0x1
+#define E4_USTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT       3
+#define E4_USTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK                0x1
+#define E4_USTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT       4
+#define E4_USTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK                0x1
+#define E4_USTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT       5
+#define E4_USTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK                0x1
+#define E4_USTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT       6
+#define E4_USTORM_FCOE_CONN_AG_CTX_RULE8EN_MASK                0x1
+#define E4_USTORM_FCOE_CONN_AG_CTX_RULE8EN_SHIFT       7
        u8 byte2;
        u8 byte3;
        __le16 word0;
@@ -9996,72 +10633,76 @@ struct ustorm_fcoe_conn_ag_ctx {
        __le16 word3;
 };
 
+/* The fcoe storm context of Tstorm */
 struct tstorm_fcoe_conn_st_ctx {
        __le16 stat_ram_addr;
        __le16 rx_max_fc_payload_len;
        __le16 e_d_tov_val;
        u8 flags;
-#define TSTORM_FCOE_CONN_ST_CTX_INC_SEQ_CNT_MASK   0x1
-#define TSTORM_FCOE_CONN_ST_CTX_INC_SEQ_CNT_SHIFT  0
-#define TSTORM_FCOE_CONN_ST_CTX_SUPPORT_CONF_MASK  0x1
-#define TSTORM_FCOE_CONN_ST_CTX_SUPPORT_CONF_SHIFT 1
-#define TSTORM_FCOE_CONN_ST_CTX_DEF_Q_IDX_MASK     0x3F
-#define TSTORM_FCOE_CONN_ST_CTX_DEF_Q_IDX_SHIFT    2
+#define TSTORM_FCOE_CONN_ST_CTX_INC_SEQ_CNT_MASK       0x1
+#define TSTORM_FCOE_CONN_ST_CTX_INC_SEQ_CNT_SHIFT      0
+#define TSTORM_FCOE_CONN_ST_CTX_SUPPORT_CONF_MASK      0x1
+#define TSTORM_FCOE_CONN_ST_CTX_SUPPORT_CONF_SHIFT     1
+#define TSTORM_FCOE_CONN_ST_CTX_DEF_Q_IDX_MASK         0x3F
+#define TSTORM_FCOE_CONN_ST_CTX_DEF_Q_IDX_SHIFT                2
        u8 timers_cleanup_invocation_cnt;
        __le32 reserved1[2];
-       __le32 dst_mac_address_bytes0to3;
-       __le16 dst_mac_address_bytes4to5;
+       __le32 dst_mac_address_bytes_0_to_3;
+       __le16 dst_mac_address_bytes_4_to_5;
        __le16 ramrod_echo;
        u8 flags1;
-#define TSTORM_FCOE_CONN_ST_CTX_MODE_MASK          0x3
-#define TSTORM_FCOE_CONN_ST_CTX_MODE_SHIFT         0
-#define TSTORM_FCOE_CONN_ST_CTX_RESERVED_MASK      0x3F
-#define TSTORM_FCOE_CONN_ST_CTX_RESERVED_SHIFT     2
-       u8 q_relative_offset;
+#define TSTORM_FCOE_CONN_ST_CTX_MODE_MASK      0x3
+#define TSTORM_FCOE_CONN_ST_CTX_MODE_SHIFT     0
+#define TSTORM_FCOE_CONN_ST_CTX_RESERVED_MASK  0x3F
+#define TSTORM_FCOE_CONN_ST_CTX_RESERVED_SHIFT 2
+       u8 cq_relative_offset;
+       u8 cmdq_relative_offset;
        u8 bdq_resource_id;
-       u8 reserved0[5];
+       u8 reserved0[4];
 };
 
-struct mstorm_fcoe_conn_ag_ctx {
+struct e4_mstorm_fcoe_conn_ag_ctx {
        u8 byte0;
        u8 byte1;
        u8 flags0;
-#define MSTORM_FCOE_CONN_AG_CTX_BIT0_MASK     0x1
-#define MSTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT    0
-#define MSTORM_FCOE_CONN_AG_CTX_BIT1_MASK     0x1
-#define MSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT    1
-#define MSTORM_FCOE_CONN_AG_CTX_CF0_MASK      0x3
-#define MSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT     2
-#define MSTORM_FCOE_CONN_AG_CTX_CF1_MASK      0x3
-#define MSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT     4
-#define MSTORM_FCOE_CONN_AG_CTX_CF2_MASK      0x3
-#define MSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT     6
+#define E4_MSTORM_FCOE_CONN_AG_CTX_BIT0_MASK   0x1
+#define E4_MSTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT  0
+#define E4_MSTORM_FCOE_CONN_AG_CTX_BIT1_MASK   0x1
+#define E4_MSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT  1
+#define E4_MSTORM_FCOE_CONN_AG_CTX_CF0_MASK    0x3
+#define E4_MSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT   2
+#define E4_MSTORM_FCOE_CONN_AG_CTX_CF1_MASK    0x3
+#define E4_MSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT   4
+#define E4_MSTORM_FCOE_CONN_AG_CTX_CF2_MASK    0x3
+#define E4_MSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT   6
        u8 flags1;
-#define MSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK    0x1
-#define MSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT   0
-#define MSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK    0x1
-#define MSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT   1
-#define MSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK    0x1
-#define MSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT   2
-#define MSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK  0x1
-#define MSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define MSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK  0x1
-#define MSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define MSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK  0x1
-#define MSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define MSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK  0x1
-#define MSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define MSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK  0x1
-#define MSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define E4_MSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK          0x1
+#define E4_MSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT         0
+#define E4_MSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK          0x1
+#define E4_MSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT         1
+#define E4_MSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK          0x1
+#define E4_MSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT         2
+#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK                0x1
+#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT       3
+#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK                0x1
+#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT       4
+#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK                0x1
+#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT       5
+#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK                0x1
+#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT       6
+#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK                0x1
+#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT       7
        __le16 word0;
        __le16 word1;
        __le32 reg0;
        __le32 reg1;
 };
 
+/* Fast path part of the fcoe storm context of Mstorm */
 struct fcoe_mstorm_fcoe_conn_st_ctx_fp {
        __le16 xfer_prod;
-       __le16 reserved1;
+       u8 num_cqs;
+       u8 reserved1;
        u8 protection_info;
 #define FCOE_MSTORM_FCOE_CONN_ST_CTX_FP_SUPPORT_PROTECTION_MASK  0x1
 #define FCOE_MSTORM_FCOE_CONN_ST_CTX_FP_SUPPORT_PROTECTION_SHIFT 0
@@ -10073,6 +10714,7 @@ struct fcoe_mstorm_fcoe_conn_st_ctx_fp {
        u8 reserved2[2];
 };
 
+/* Non fast path part of the fcoe storm context of Mstorm */
 struct fcoe_mstorm_fcoe_conn_st_ctx_non_fp {
        __le16 conn_id;
        __le16 stat_ram_addr;
@@ -10088,37 +10730,46 @@ struct fcoe_mstorm_fcoe_conn_st_ctx_non_fp {
        struct regpair reserved2[3];
 };
 
+/* The fcoe storm context of Mstorm */
 struct mstorm_fcoe_conn_st_ctx {
        struct fcoe_mstorm_fcoe_conn_st_ctx_fp fp;
        struct fcoe_mstorm_fcoe_conn_st_ctx_non_fp non_fp;
 };
 
-struct fcoe_conn_context {
+/* fcoe connection context */
+struct e4_fcoe_conn_context {
        struct ystorm_fcoe_conn_st_ctx ystorm_st_context;
        struct pstorm_fcoe_conn_st_ctx pstorm_st_context;
        struct regpair pstorm_st_padding[2];
        struct xstorm_fcoe_conn_st_ctx xstorm_st_context;
-       struct xstorm_fcoe_conn_ag_ctx xstorm_ag_context;
+       struct e4_xstorm_fcoe_conn_ag_ctx xstorm_ag_context;
        struct regpair xstorm_ag_padding[6];
        struct ustorm_fcoe_conn_st_ctx ustorm_st_context;
        struct regpair ustorm_st_padding[2];
-       struct tstorm_fcoe_conn_ag_ctx tstorm_ag_context;
+       struct e4_tstorm_fcoe_conn_ag_ctx tstorm_ag_context;
        struct regpair tstorm_ag_padding[2];
        struct timers_context timer_context;
-       struct ustorm_fcoe_conn_ag_ctx ustorm_ag_context;
+       struct e4_ustorm_fcoe_conn_ag_ctx ustorm_ag_context;
        struct tstorm_fcoe_conn_st_ctx tstorm_st_context;
-       struct mstorm_fcoe_conn_ag_ctx mstorm_ag_context;
+       struct e4_mstorm_fcoe_conn_ag_ctx mstorm_ag_context;
        struct mstorm_fcoe_conn_st_ctx mstorm_st_context;
 };
 
+/* FCoE connection offload params passed by driver to FW in FCoE offload
+ * ramrod.
+ */
 struct fcoe_conn_offload_ramrod_params {
        struct fcoe_conn_offload_ramrod_data offload_ramrod_data;
 };
 
+/* FCoE connection terminate params passed by driver to FW in FCoE terminate
+ * conn ramrod.
+ */
 struct fcoe_conn_terminate_ramrod_params {
        struct fcoe_conn_terminate_ramrod_data terminate_ramrod_data;
 };
 
+/* FCoE event type */
 enum fcoe_event_type {
        FCOE_EVENT_INIT_FUNC,
        FCOE_EVENT_DESTROY_FUNC,
@@ -10129,10 +10780,12 @@ enum fcoe_event_type {
        MAX_FCOE_EVENT_TYPE
 };
 
+/* FCoE init params passed by driver to FW in FCoE init ramrod */
 struct fcoe_init_ramrod_params {
        struct fcoe_init_func_ramrod_data init_ramrod_data;
 };
 
+/* FCoE ramrod Command IDs */
 enum fcoe_ramrod_cmd_id {
        FCOE_RAMROD_CMD_ID_INIT_FUNC,
        FCOE_RAMROD_CMD_ID_DESTROY_FUNC,
@@ -10142,41 +10795,44 @@ enum fcoe_ramrod_cmd_id {
        MAX_FCOE_RAMROD_CMD_ID
 };
 
+/* FCoE statistics params buffer passed by driver to FW in FCoE statistics
+ * ramrod.
+ */
 struct fcoe_stat_ramrod_params {
        struct fcoe_stat_ramrod_data stat_ramrod_data;
 };
 
-struct ystorm_fcoe_conn_ag_ctx {
+struct e4_ystorm_fcoe_conn_ag_ctx {
        u8 byte0;
        u8 byte1;
        u8 flags0;
-#define YSTORM_FCOE_CONN_AG_CTX_BIT0_MASK     0x1
-#define YSTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT    0
-#define YSTORM_FCOE_CONN_AG_CTX_BIT1_MASK     0x1
-#define YSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT    1
-#define YSTORM_FCOE_CONN_AG_CTX_CF0_MASK      0x3
-#define YSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT     2
-#define YSTORM_FCOE_CONN_AG_CTX_CF1_MASK      0x3
-#define YSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT     4
-#define YSTORM_FCOE_CONN_AG_CTX_CF2_MASK      0x3
-#define YSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT     6
+#define E4_YSTORM_FCOE_CONN_AG_CTX_BIT0_MASK   0x1
+#define E4_YSTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT  0
+#define E4_YSTORM_FCOE_CONN_AG_CTX_BIT1_MASK   0x1
+#define E4_YSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT  1
+#define E4_YSTORM_FCOE_CONN_AG_CTX_CF0_MASK    0x3
+#define E4_YSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT   2
+#define E4_YSTORM_FCOE_CONN_AG_CTX_CF1_MASK    0x3
+#define E4_YSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT   4
+#define E4_YSTORM_FCOE_CONN_AG_CTX_CF2_MASK    0x3
+#define E4_YSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT   6
        u8 flags1;
-#define YSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK    0x1
-#define YSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT   0
-#define YSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK    0x1
-#define YSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT   1
-#define YSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK    0x1
-#define YSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT   2
-#define YSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK  0x1
-#define YSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define YSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK  0x1
-#define YSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define YSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK  0x1
-#define YSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define YSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK  0x1
-#define YSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define YSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK  0x1
-#define YSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define E4_YSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK          0x1
+#define E4_YSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT         0
+#define E4_YSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK          0x1
+#define E4_YSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT         1
+#define E4_YSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK          0x1
+#define E4_YSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT         2
+#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK                0x1
+#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT       3
+#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK                0x1
+#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT       4
+#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK                0x1
+#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT       5
+#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK                0x1
+#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT       6
+#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK                0x1
+#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT       7
        u8 byte2;
        u8 byte3;
        __le16 word0;
@@ -10190,230 +10846,233 @@ struct ystorm_fcoe_conn_ag_ctx {
        __le32 reg3;
 };
 
+/* The iscsi storm connection context of Ystorm */
 struct ystorm_iscsi_conn_st_ctx {
-       __le32 reserved[4];
+       __le32 reserved[8];
 };
 
+/* Combined iSCSI and TCP storm connection of Pstorm */
 struct pstorm_iscsi_tcp_conn_st_ctx {
        __le32 tcp[32];
        __le32 iscsi[4];
 };
 
+/* The combined tcp and iscsi storm context of Xstorm */
 struct xstorm_iscsi_tcp_conn_st_ctx {
-       __le32 reserved_iscsi[40];
        __le32 reserved_tcp[4];
+       __le32 reserved_iscsi[44];
 };
 
-struct xstorm_iscsi_conn_ag_ctx {
+struct e4_xstorm_iscsi_conn_ag_ctx {
        u8 cdu_validation;
        u8 state;
        u8 flags0;
-#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_MASK                0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_SHIFT               0
-#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM1_MASK                0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM1_SHIFT               1
-#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED1_MASK                   0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED1_SHIFT                  2
-#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM3_MASK                0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM3_SHIFT               3
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT4_MASK                        0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT4_SHIFT                       4
-#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED2_MASK                   0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED2_SHIFT                  5
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT6_MASK                        0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT6_SHIFT                       6
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT7_MASK                        0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT7_SHIFT                       7
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_MASK  0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM1_MASK  0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM1_SHIFT 1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RESERVED1_MASK     0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RESERVED1_SHIFT    2
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM3_MASK  0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT4_MASK          0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT4_SHIFT         4
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RESERVED2_MASK     0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RESERVED2_SHIFT    5
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT6_MASK          0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT6_SHIFT         6
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT7_MASK          0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT7_SHIFT         7
        u8 flags1;
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT8_MASK                        0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT8_SHIFT                       0
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT9_MASK                        0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT9_SHIFT                       1
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT10_MASK                       0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT10_SHIFT                      2
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT11_MASK                       0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT11_SHIFT                      3
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT12_MASK                       0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT12_SHIFT                      4
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT13_MASK                       0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT13_SHIFT                      5
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT14_MASK                       0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT14_SHIFT                      6
-#define XSTORM_ISCSI_CONN_AG_CTX_TX_TRUNCATE_MASK                 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_TX_TRUNCATE_SHIFT                7
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT8_MASK          0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT8_SHIFT         0
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT9_MASK          0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT9_SHIFT         1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT10_MASK         0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT10_SHIFT                2
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT11_MASK         0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT11_SHIFT                3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT12_MASK         0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT12_SHIFT                4
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT13_MASK         0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT13_SHIFT                5
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT14_MASK         0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT14_SHIFT                6
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_TX_TRUNCATE_MASK   0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_TX_TRUNCATE_SHIFT  7
        u8 flags2;
-#define XSTORM_ISCSI_CONN_AG_CTX_CF0_MASK                         0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT                        0
-#define XSTORM_ISCSI_CONN_AG_CTX_CF1_MASK                         0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT                        2
-#define XSTORM_ISCSI_CONN_AG_CTX_CF2_MASK                         0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT                        4
-#define XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_MASK              0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT             6
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF0_MASK                   0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT                  0
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF1_MASK                   0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT                  2
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF2_MASK                   0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT                  4
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_MASK                0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT       6
        u8 flags3;
-#define XSTORM_ISCSI_CONN_AG_CTX_CF4_MASK                         0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT                        0
-#define XSTORM_ISCSI_CONN_AG_CTX_CF5_MASK                         0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT                        2
-#define XSTORM_ISCSI_CONN_AG_CTX_CF6_MASK                         0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT                        4
-#define XSTORM_ISCSI_CONN_AG_CTX_CF7_MASK                         0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF7_SHIFT                        6
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF4_MASK   0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT  0
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF5_MASK   0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT  2
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF6_MASK   0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT  4
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF7_MASK   0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF7_SHIFT  6
        u8 flags4;
-#define XSTORM_ISCSI_CONN_AG_CTX_CF8_MASK                         0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF8_SHIFT                        0
-#define XSTORM_ISCSI_CONN_AG_CTX_CF9_MASK                         0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF9_SHIFT                        2
-#define XSTORM_ISCSI_CONN_AG_CTX_CF10_MASK                        0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF10_SHIFT                       4
-#define XSTORM_ISCSI_CONN_AG_CTX_CF11_MASK                        0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF11_SHIFT                       6
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF8_MASK   0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF8_SHIFT  0
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF9_MASK   0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF9_SHIFT  2
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF10_MASK  0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF10_SHIFT 4
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF11_MASK  0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF11_SHIFT 6
        u8 flags5;
-#define XSTORM_ISCSI_CONN_AG_CTX_CF12_MASK                        0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF12_SHIFT                       0
-#define XSTORM_ISCSI_CONN_AG_CTX_CF13_MASK                        0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF13_SHIFT                       2
-#define XSTORM_ISCSI_CONN_AG_CTX_CF14_MASK                        0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF14_SHIFT                       4
-#define XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_MASK     0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_SHIFT    6
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF12_MASK                          0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF12_SHIFT                         0
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF13_MASK                          0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF13_SHIFT                         2
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF14_MASK                          0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF14_SHIFT                         4
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_MASK       0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_SHIFT      6
        u8 flags6;
-#define XSTORM_ISCSI_CONN_AG_CTX_CF16_MASK                        0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF16_SHIFT                       0
-#define XSTORM_ISCSI_CONN_AG_CTX_CF17_MASK                        0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF17_SHIFT                       2
-#define XSTORM_ISCSI_CONN_AG_CTX_CF18_MASK                        0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF18_SHIFT                       4
-#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_MASK                    0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_SHIFT                   6
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF16_MASK          0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF16_SHIFT         0
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF17_MASK          0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF17_SHIFT         2
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF18_MASK          0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF18_SHIFT         4
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_MASK      0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_SHIFT     6
        u8 flags7;
-#define XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_MASK      0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_SHIFT        0
-#define XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_MASK      0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_SHIFT        2
-#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_MASK                   0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_SHIFT                  4
-#define XSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK                       0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT                      6
-#define XSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK                       0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT                      7
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_MASK   0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_SHIFT  0
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_MASK   0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_SHIFT  2
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_MASK             0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_SHIFT            4
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK                 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT                        6
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK                 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT                        7
        u8 flags8;
-#define XSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK                       0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT                      0
-#define XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK           0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT          1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK                       0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT                      2
-#define XSTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK                       0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT                      3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK                       0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT                      4
-#define XSTORM_ISCSI_CONN_AG_CTX_CF7EN_MASK                       0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF7EN_SHIFT                      5
-#define XSTORM_ISCSI_CONN_AG_CTX_CF8EN_MASK                       0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF8EN_SHIFT                      6
-#define XSTORM_ISCSI_CONN_AG_CTX_CF9EN_MASK                       0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF9EN_SHIFT                      7
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK                 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT                        0
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK     0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT    1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK                 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT                        2
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK                 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT                        3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK                 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT                        4
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF7EN_MASK                 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF7EN_SHIFT                        5
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF8EN_MASK                 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF8EN_SHIFT                        6
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF9EN_MASK                 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF9EN_SHIFT                        7
        u8 flags9;
-#define XSTORM_ISCSI_CONN_AG_CTX_CF10EN_MASK                      0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF10EN_SHIFT                     0
-#define XSTORM_ISCSI_CONN_AG_CTX_CF11EN_MASK                      0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF11EN_SHIFT                     1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF12EN_MASK                      0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF12EN_SHIFT                     2
-#define XSTORM_ISCSI_CONN_AG_CTX_CF13EN_MASK                      0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF13EN_SHIFT                     3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF14EN_MASK                      0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF14EN_SHIFT                     4
-#define XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_EN_MASK  0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_EN_SHIFT 5
-#define XSTORM_ISCSI_CONN_AG_CTX_CF16EN_MASK                      0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF16EN_SHIFT                     6
-#define XSTORM_ISCSI_CONN_AG_CTX_CF17EN_MASK                      0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF17EN_SHIFT                     7
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF10EN_MASK                                0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF10EN_SHIFT                       0
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF11EN_MASK                                0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF11EN_SHIFT                       1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF12EN_MASK                                0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF12EN_SHIFT                       2
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF13EN_MASK                                0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF13EN_SHIFT                       3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF14EN_MASK                                0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF14EN_SHIFT                       4
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_EN_MASK    0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_EN_SHIFT   5
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF16EN_MASK                                0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF16EN_SHIFT                       6
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF17EN_MASK                                0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF17EN_SHIFT                       7
        u8 flags10;
-#define XSTORM_ISCSI_CONN_AG_CTX_CF18EN_MASK                      0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF18EN_SHIFT                     0
-#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_EN_MASK                 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_EN_SHIFT                1
-#define XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_EN_MASK   0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_EN_SHIFT     2
-#define XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_EN_MASK   0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_EN_SHIFT     3
-#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_EN_MASK                0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_EN_SHIFT               4
-#define XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_EN_MASK        0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_EN_SHIFT       5
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK                     0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT                    6
-#define XSTORM_ISCSI_CONN_AG_CTX_MORE_TO_SEND_DEC_RULE_EN_MASK    0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_MORE_TO_SEND_DEC_RULE_EN_SHIFT   7
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF18EN_MASK                                0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF18EN_SHIFT                       0
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_EN_MASK                   0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_EN_SHIFT                  1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_EN_MASK                0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_EN_SHIFT       2
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_EN_MASK                0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_EN_SHIFT       3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_EN_MASK                  0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_EN_SHIFT                 4
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_EN_MASK          0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_EN_SHIFT         5
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK                       0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT                      6
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_MORE_TO_SEND_DEC_RULE_EN_MASK      0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_MORE_TO_SEND_DEC_RULE_EN_SHIFT     7
        u8 flags11;
-#define XSTORM_ISCSI_CONN_AG_CTX_TX_BLOCKED_EN_MASK    0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_TX_BLOCKED_EN_SHIFT              0
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK                     0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT                    1
-#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED3_MASK                   0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED3_SHIFT                  2
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK                     0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT                    3
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK                     0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT                    4
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK                     0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT                    5
-#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED1_MASK                0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED1_SHIFT               6
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE9EN_MASK                     0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE9EN_SHIFT                    7
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_TX_BLOCKED_EN_MASK 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_TX_BLOCKED_EN_SHIFT        0
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK       0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT      1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RESERVED3_MASK     0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RESERVED3_SHIFT    2
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK       0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT      3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK       0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT      4
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK       0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT      5
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED1_MASK  0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE9EN_MASK       0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE9EN_SHIFT      7
        u8 flags12;
-#define XSTORM_ISCSI_CONN_AG_CTX_SQ_DEC_RULE_EN_MASK              0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_SQ_DEC_RULE_EN_SHIFT             0
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE11EN_MASK                    0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE11EN_SHIFT                   1
-#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED2_MASK                0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED2_SHIFT               2
-#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED3_MASK                0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED3_SHIFT               3
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE14EN_MASK                    0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE14EN_SHIFT                   4
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE15EN_MASK                    0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE15EN_SHIFT                   5
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE16EN_MASK                    0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE16EN_SHIFT                   6
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE17EN_MASK                    0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE17EN_SHIFT                   7
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_SQ_DEC_RULE_EN_MASK                0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_SQ_DEC_RULE_EN_SHIFT       0
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE11EN_MASK              0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE11EN_SHIFT             1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED2_MASK          0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED2_SHIFT         2
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED3_MASK          0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED3_SHIFT         3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE14EN_MASK              0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE14EN_SHIFT             4
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE15EN_MASK              0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE15EN_SHIFT             5
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE16EN_MASK              0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE16EN_SHIFT             6
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE17EN_MASK              0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE17EN_SHIFT             7
        u8 flags13;
-#define XSTORM_ISCSI_CONN_AG_CTX_R2TQ_DEC_RULE_EN_MASK            0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_R2TQ_DEC_RULE_EN_SHIFT           0
-#define XSTORM_ISCSI_CONN_AG_CTX_HQ_DEC_RULE_EN_MASK              0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_HQ_DEC_RULE_EN_SHIFT             1
-#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED4_MASK                0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED4_SHIFT               2
-#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED5_MASK                0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED5_SHIFT               3
-#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED6_MASK                0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED6_SHIFT               4
-#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED7_MASK                0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED7_SHIFT               5
-#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED8_MASK                0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED8_SHIFT               6
-#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED9_MASK                0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED9_SHIFT               7
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_R2TQ_DEC_RULE_EN_MASK      0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_R2TQ_DEC_RULE_EN_SHIFT     0
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_HQ_DEC_RULE_EN_MASK                0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_HQ_DEC_RULE_EN_SHIFT       1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED4_MASK          0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED4_SHIFT         2
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED5_MASK          0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED5_SHIFT         3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED6_MASK          0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED6_SHIFT         4
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED7_MASK          0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED7_SHIFT         5
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED8_MASK          0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED8_SHIFT         6
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED9_MASK          0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED9_SHIFT         7
        u8 flags14;
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT16_MASK                       0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT16_SHIFT                      0
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT17_MASK                       0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT17_SHIFT                      1
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT18_MASK                       0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT18_SHIFT                      2
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT19_MASK                       0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT19_SHIFT                      3
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT20_MASK                       0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT20_SHIFT                      4
-#define XSTORM_ISCSI_CONN_AG_CTX_DUMMY_READ_DONE_MASK             0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_DUMMY_READ_DONE_SHIFT            5
-#define XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_MASK           0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_SHIFT          6
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT16_MASK                 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT16_SHIFT                        0
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT17_MASK                 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT17_SHIFT                        1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT18_MASK                 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT18_SHIFT                        2
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT19_MASK                 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT19_SHIFT                        3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT20_MASK                 0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT20_SHIFT                        4
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_DUMMY_READ_DONE_MASK       0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_DUMMY_READ_DONE_SHIFT      5
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_MASK     0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_SHIFT    6
        u8 byte2;
        __le16 physical_q0;
        __le16 physical_q1;
@@ -10449,7 +11108,7 @@ struct xstorm_iscsi_conn_ag_ctx {
        u8 byte13;
        u8 byte14;
        u8 byte15;
-       u8 ereserved;
+       u8 e5_reserved;
        __le16 word11;
        __le32 reg10;
        __le32 reg11;
@@ -10461,89 +11120,89 @@ struct xstorm_iscsi_conn_ag_ctx {
        __le32 reg17;
 };
 
-struct tstorm_iscsi_conn_ag_ctx {
+struct e4_tstorm_iscsi_conn_ag_ctx {
        u8 reserved0;
        u8 state;
        u8 flags0;
-#define TSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_MASK       0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_SHIFT      0
-#define TSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK               0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT              1
-#define TSTORM_ISCSI_CONN_AG_CTX_BIT2_MASK               0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_BIT2_SHIFT              2
-#define TSTORM_ISCSI_CONN_AG_CTX_BIT3_MASK               0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_BIT3_SHIFT              3
-#define TSTORM_ISCSI_CONN_AG_CTX_BIT4_MASK               0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_BIT4_SHIFT              4
-#define TSTORM_ISCSI_CONN_AG_CTX_BIT5_MASK               0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_BIT5_SHIFT              5
-#define TSTORM_ISCSI_CONN_AG_CTX_CF0_MASK                0x3
-#define TSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT               6
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_MASK  0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK          0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT         1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT2_MASK          0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT2_SHIFT         2
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT3_MASK          0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT3_SHIFT         3
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT4_MASK          0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT4_SHIFT         4
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT5_MASK          0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT5_SHIFT         5
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF0_MASK           0x3
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT          6
        u8 flags1;
-#define TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_MASK     0x3
-#define TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_SHIFT      0
-#define TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_MASK     0x3
-#define TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_SHIFT      2
-#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_MASK     0x3
-#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT    4
-#define TSTORM_ISCSI_CONN_AG_CTX_CF4_MASK                0x3
-#define TSTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT               6
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_MASK          0x3
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_SHIFT         0
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_MASK          0x3
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_SHIFT         2
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_MASK                0x3
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT       4
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF4_MASK                   0x3
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT                  6
        u8 flags2;
-#define TSTORM_ISCSI_CONN_AG_CTX_CF5_MASK                0x3
-#define TSTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT               0
-#define TSTORM_ISCSI_CONN_AG_CTX_CF6_MASK                0x3
-#define TSTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT               2
-#define TSTORM_ISCSI_CONN_AG_CTX_CF7_MASK                0x3
-#define TSTORM_ISCSI_CONN_AG_CTX_CF7_SHIFT               4
-#define TSTORM_ISCSI_CONN_AG_CTX_CF8_MASK                0x3
-#define TSTORM_ISCSI_CONN_AG_CTX_CF8_SHIFT               6
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF5_MASK   0x3
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT  0
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF6_MASK   0x3
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT  2
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF7_MASK   0x3
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF7_SHIFT  4
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF8_MASK   0x3
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF8_SHIFT  6
        u8 flags3;
-#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_MASK           0x3
-#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_SHIFT          0
-#define TSTORM_ISCSI_CONN_AG_CTX_CF10_MASK               0x3
-#define TSTORM_ISCSI_CONN_AG_CTX_CF10_SHIFT              2
-#define TSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK              0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT             4
-#define TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_EN_MASK  0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_EN_SHIFT   5
-#define TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_EN_MASK  0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_EN_SHIFT   6
-#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK  0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 7
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_MASK              0x3
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_SHIFT             0
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF10_MASK                  0x3
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF10_SHIFT                 2
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK                 0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT                        4
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_EN_MASK       0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_EN_SHIFT      5
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_EN_MASK       0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_EN_SHIFT      6
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK     0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT    7
        u8 flags4;
-#define TSTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK              0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT             0
-#define TSTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK              0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT             1
-#define TSTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK              0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT             2
-#define TSTORM_ISCSI_CONN_AG_CTX_CF7EN_MASK              0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_CF7EN_SHIFT             3
-#define TSTORM_ISCSI_CONN_AG_CTX_CF8EN_MASK              0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_CF8EN_SHIFT             4
-#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_MASK        0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT       5
-#define TSTORM_ISCSI_CONN_AG_CTX_CF10EN_MASK             0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_CF10EN_SHIFT            6
-#define TSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK            0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT           7
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK         0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT                0
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK         0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT                1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK         0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT                2
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF7EN_MASK         0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF7EN_SHIFT                3
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF8EN_MASK         0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF8EN_SHIFT                4
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_MASK   0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT  5
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF10EN_MASK                0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF10EN_SHIFT       6
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK       0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT      7
        u8 flags5;
-#define TSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK            0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT           0
-#define TSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK            0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT           1
-#define TSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK            0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT           2
-#define TSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK            0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT           3
-#define TSTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK            0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT           4
-#define TSTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK            0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT           5
-#define TSTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK            0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT           6
-#define TSTORM_ISCSI_CONN_AG_CTX_RULE8EN_MASK            0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_RULE8EN_SHIFT           7
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK       0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT      0
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK       0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT      1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK       0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT      2
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK       0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT      3
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK       0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT      4
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK       0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT      5
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK       0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT      6
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE8EN_MASK       0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE8EN_SHIFT      7
        __le32 reg0;
        __le32 reg1;
        __le32 reg2;
@@ -10558,63 +11217,63 @@ struct tstorm_iscsi_conn_ag_ctx {
        __le16 word0;
 };
 
-struct ustorm_iscsi_conn_ag_ctx {
+struct e4_ustorm_iscsi_conn_ag_ctx {
        u8 byte0;
        u8 byte1;
        u8 flags0;
-#define USTORM_ISCSI_CONN_AG_CTX_BIT0_MASK     0x1
-#define USTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT    0
-#define USTORM_ISCSI_CONN_AG_CTX_BIT1_MASK     0x1
-#define USTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT    1
-#define USTORM_ISCSI_CONN_AG_CTX_CF0_MASK      0x3
-#define USTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT     2
-#define USTORM_ISCSI_CONN_AG_CTX_CF1_MASK      0x3
-#define USTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT     4
-#define USTORM_ISCSI_CONN_AG_CTX_CF2_MASK      0x3
-#define USTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT     6
+#define E4_USTORM_ISCSI_CONN_AG_CTX_BIT0_MASK  0x1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0
+#define E4_USTORM_ISCSI_CONN_AG_CTX_BIT1_MASK  0x1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF0_MASK   0x3
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT  2
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF1_MASK   0x3
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT  4
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF2_MASK   0x3
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT  6
        u8 flags1;
-#define USTORM_ISCSI_CONN_AG_CTX_CF3_MASK      0x3
-#define USTORM_ISCSI_CONN_AG_CTX_CF3_SHIFT     0
-#define USTORM_ISCSI_CONN_AG_CTX_CF4_MASK      0x3
-#define USTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT     2
-#define USTORM_ISCSI_CONN_AG_CTX_CF5_MASK      0x3
-#define USTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT     4
-#define USTORM_ISCSI_CONN_AG_CTX_CF6_MASK      0x3
-#define USTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT     6
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF3_MASK   0x3
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF3_SHIFT  0
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF4_MASK   0x3
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT  2
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF5_MASK   0x3
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT  4
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF6_MASK   0x3
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT  6
        u8 flags2;
-#define USTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK    0x1
-#define USTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT   0
-#define USTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK    0x1
-#define USTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT   1
-#define USTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK    0x1
-#define USTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT   2
-#define USTORM_ISCSI_CONN_AG_CTX_CF3EN_MASK    0x1
-#define USTORM_ISCSI_CONN_AG_CTX_CF3EN_SHIFT   3
-#define USTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK    0x1
-#define USTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT   4
-#define USTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK    0x1
-#define USTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT   5
-#define USTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK    0x1
-#define USTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT   6
-#define USTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK  0x1
-#define USTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK         0x1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT                0
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK         0x1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT                1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK         0x1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT                2
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF3EN_MASK         0x1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF3EN_SHIFT                3
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK         0x1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT                4
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK         0x1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT                5
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK         0x1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT                6
+#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK       0x1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT      7
        u8 flags3;
-#define USTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK  0x1
-#define USTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define USTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK  0x1
-#define USTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define USTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK  0x1
-#define USTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define USTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK  0x1
-#define USTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define USTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK  0x1
-#define USTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define USTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK  0x1
-#define USTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define USTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK  0x1
-#define USTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define USTORM_ISCSI_CONN_AG_CTX_RULE8EN_MASK  0x1
-#define USTORM_ISCSI_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK       0x1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT      0
+#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK       0x1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT      1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK       0x1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT      2
+#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK       0x1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT      3
+#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK       0x1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT      4
+#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK       0x1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT      5
+#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK       0x1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT      6
+#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE8EN_MASK       0x1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE8EN_SHIFT      7
        u8 byte2;
        u8 byte3;
        __le16 word0;
@@ -10627,113 +11286,117 @@ struct ustorm_iscsi_conn_ag_ctx {
        __le16 word3;
 };
 
+/* The iscsi storm connection context of Tstorm */
 struct tstorm_iscsi_conn_st_ctx {
-       __le32 reserved[40];
+       __le32 reserved[44];
 };
 
-struct mstorm_iscsi_conn_ag_ctx {
+struct e4_mstorm_iscsi_conn_ag_ctx {
        u8 reserved;
        u8 state;
        u8 flags0;
-#define MSTORM_ISCSI_CONN_AG_CTX_BIT0_MASK     0x1
-#define MSTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT    0
-#define MSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK     0x1
-#define MSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT    1
-#define MSTORM_ISCSI_CONN_AG_CTX_CF0_MASK      0x3
-#define MSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT     2
-#define MSTORM_ISCSI_CONN_AG_CTX_CF1_MASK      0x3
-#define MSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT     4
-#define MSTORM_ISCSI_CONN_AG_CTX_CF2_MASK      0x3
-#define MSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT     6
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_BIT0_MASK  0x1
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK  0x1
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF0_MASK   0x3
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT  2
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF1_MASK   0x3
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT  4
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF2_MASK   0x3
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT  6
        u8 flags1;
-#define MSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK    0x1
-#define MSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT   0
-#define MSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK    0x1
-#define MSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT   1
-#define MSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK    0x1
-#define MSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT   2
-#define MSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK  0x1
-#define MSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define MSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK  0x1
-#define MSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define MSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK  0x1
-#define MSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define MSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK  0x1
-#define MSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define MSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK  0x1
-#define MSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK         0x1
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT                0
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK         0x1
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT                1
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK         0x1
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT                2
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK       0x1
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT      3
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK       0x1
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT      4
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK       0x1
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT      5
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK       0x1
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT      6
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK       0x1
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT      7
        __le16 word0;
        __le16 word1;
        __le32 reg0;
        __le32 reg1;
 };
 
+/* Combined iSCSI and TCP storm connection of Mstorm */
 struct mstorm_iscsi_tcp_conn_st_ctx {
        __le32 reserved_tcp[20];
-       __le32 reserved_iscsi[8];
+       __le32 reserved_iscsi[12];
 };
 
+/* The iscsi storm context of Ustorm */
 struct ustorm_iscsi_conn_st_ctx {
        __le32 reserved[52];
 };
 
-struct iscsi_conn_context {
+/* iscsi connection context */
+struct e4_iscsi_conn_context {
        struct ystorm_iscsi_conn_st_ctx ystorm_st_context;
-       struct regpair ystorm_st_padding[2];
        struct pstorm_iscsi_tcp_conn_st_ctx pstorm_st_context;
        struct regpair pstorm_st_padding[2];
        struct pb_context xpb2_context;
        struct xstorm_iscsi_tcp_conn_st_ctx xstorm_st_context;
        struct regpair xstorm_st_padding[2];
-       struct xstorm_iscsi_conn_ag_ctx xstorm_ag_context;
-       struct tstorm_iscsi_conn_ag_ctx tstorm_ag_context;
+       struct e4_xstorm_iscsi_conn_ag_ctx xstorm_ag_context;
+       struct e4_tstorm_iscsi_conn_ag_ctx tstorm_ag_context;
        struct regpair tstorm_ag_padding[2];
        struct timers_context timer_context;
-       struct ustorm_iscsi_conn_ag_ctx ustorm_ag_context;
+       struct e4_ustorm_iscsi_conn_ag_ctx ustorm_ag_context;
        struct pb_context upb_context;
        struct tstorm_iscsi_conn_st_ctx tstorm_st_context;
        struct regpair tstorm_st_padding[2];
-       struct mstorm_iscsi_conn_ag_ctx mstorm_ag_context;
+       struct e4_mstorm_iscsi_conn_ag_ctx mstorm_ag_context;
        struct mstorm_iscsi_tcp_conn_st_ctx mstorm_st_context;
        struct ustorm_iscsi_conn_st_ctx ustorm_st_context;
 };
 
+/* iSCSI init params passed by driver to FW in iSCSI init ramrod */
 struct iscsi_init_ramrod_params {
        struct iscsi_spe_func_init iscsi_init_spe;
        struct tcp_init_params tcp_init;
 };
 
-struct ystorm_iscsi_conn_ag_ctx {
+struct e4_ystorm_iscsi_conn_ag_ctx {
        u8 byte0;
        u8 byte1;
        u8 flags0;
-#define YSTORM_ISCSI_CONN_AG_CTX_BIT0_MASK     0x1
-#define YSTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT    0
-#define YSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK     0x1
-#define YSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT    1
-#define YSTORM_ISCSI_CONN_AG_CTX_CF0_MASK      0x3
-#define YSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT     2
-#define YSTORM_ISCSI_CONN_AG_CTX_CF1_MASK      0x3
-#define YSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT     4
-#define YSTORM_ISCSI_CONN_AG_CTX_CF2_MASK      0x3
-#define YSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT     6
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_BIT0_MASK  0x1
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK  0x1
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF0_MASK   0x3
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT  2
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF1_MASK   0x3
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT  4
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF2_MASK   0x3
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT  6
        u8 flags1;
-#define YSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK    0x1
-#define YSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT   0
-#define YSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK    0x1
-#define YSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT   1
-#define YSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK    0x1
-#define YSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT   2
-#define YSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK  0x1
-#define YSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define YSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK  0x1
-#define YSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define YSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK  0x1
-#define YSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define YSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK  0x1
-#define YSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define YSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK  0x1
-#define YSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK         0x1
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT                0
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK         0x1
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT                1
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK         0x1
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT                2
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK       0x1
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT      3
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK       0x1
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT      4
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK       0x1
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT      5
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK       0x1
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT      6
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK       0x1
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT      7
        u8 byte2;
        u8 byte3;
        __le16 word0;
@@ -11613,7 +12276,7 @@ struct public_drv_mb {
 #define DRV_MB_PARAM_DCBX_NOTIFY_MASK          0x000000FF
 #define DRV_MB_PARAM_DCBX_NOTIFY_SHIFT         3
 
-#define DRV_MB_PARAM_NVM_LEN_SHIFT             24
+#define DRV_MB_PARAM_NVM_LEN_OFFSET            24
 
 #define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT   0
 #define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK    0x000000FF
index a05feb3..fca2dbd 100644 (file)
@@ -807,3 +807,71 @@ int qed_dmae_host2host(struct qed_hwfn *p_hwfn,
        return rc;
 }
 
+int qed_dmae_sanity(struct qed_hwfn *p_hwfn,
+                   struct qed_ptt *p_ptt, const char *phase)
+{
+       u32 size = PAGE_SIZE / 2, val;
+       struct qed_dmae_params params;
+       int rc = 0;
+       dma_addr_t p_phys;
+       void *p_virt;
+       u32 *p_tmp;
+
+       p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+                                   2 * size, &p_phys, GFP_KERNEL);
+       if (!p_virt) {
+               DP_NOTICE(p_hwfn,
+                         "DMAE sanity [%s]: failed to allocate memory\n",
+                         phase);
+               return -ENOMEM;
+       }
+
+       /* Fill the bottom half of the allocated memory with a known pattern */
+       for (p_tmp = (u32 *)p_virt;
+            p_tmp < (u32 *)((u8 *)p_virt + size); p_tmp++) {
+               /* Save the address itself as the value */
+               val = (u32)(uintptr_t)p_tmp;
+               *p_tmp = val;
+       }
+
+       /* Zero the top half of the allocated memory */
+       memset((u8 *)p_virt + size, 0, size);
+
+       DP_VERBOSE(p_hwfn,
+                  QED_MSG_SP,
+                  "DMAE sanity [%s]: src_addr={phys 0x%llx, virt %p}, dst_addr={phys 0x%llx, virt %p}, size 0x%x\n",
+                  phase,
+                  (u64)p_phys,
+                  p_virt, (u64)(p_phys + size), (u8 *)p_virt + size, size);
+
+       memset(&params, 0, sizeof(params));
+       rc = qed_dmae_host2host(p_hwfn, p_ptt, p_phys, p_phys + size,
+                               size / 4 /* size_in_dwords */, &params);
+       if (rc) {
+               DP_NOTICE(p_hwfn,
+                         "DMAE sanity [%s]: qed_dmae_host2host() failed. rc = %d.\n",
+                         phase, rc);
+               goto out;
+       }
+
+       /* Verify that the top half of the allocated memory has the pattern */
+       for (p_tmp = (u32 *)((u8 *)p_virt + size);
+            p_tmp < (u32 *)((u8 *)p_virt + (2 * size)); p_tmp++) {
+               /* The corresponding address in the bottom half */
+               val = (u32)(uintptr_t)p_tmp - size;
+
+               if (*p_tmp != val) {
+                       DP_NOTICE(p_hwfn,
+                                 "DMAE sanity [%s]: addr={phys 0x%llx, virt %p}, read_val 0x%08x, expected_val 0x%08x\n",
+                                 phase,
+                                 (u64)p_phys + ((u8 *)p_tmp - (u8 *)p_virt),
+                                 p_tmp, *p_tmp, val);
+                       rc = -EINVAL;
+                       goto out;
+               }
+       }
+
+out:
+       dma_free_coherent(&p_hwfn->cdev->pdev->dev, 2 * size, p_virt, p_phys);
+       return rc;
+}
index f2505c6..8db2839 100644 (file)
@@ -299,4 +299,8 @@ union qed_qm_pq_params {
 
 int qed_init_fw_data(struct qed_dev *cdev,
                     const u8 *fw_data);
+
+int qed_dmae_sanity(struct qed_hwfn *p_hwfn,
+                   struct qed_ptt *p_ptt, const char *phase);
+
 #endif
index b069ad0..18fb506 100644 (file)
@@ -31,6 +31,7 @@
  */
 
 #include <linux/types.h>
+#include <linux/crc8.h>
 #include <linux/delay.h>
 #include <linux/kernel.h>
 #include <linux/slab.h>
 #include "qed_init_ops.h"
 #include "qed_reg_addr.h"
 
+#define CDU_VALIDATION_DEFAULT_CFG     61
+
+static u16 con_region_offsets[3][NUM_OF_CONNECTION_TYPES_E4] = {
+       {400, 336, 352, 304, 304, 384, 416, 352},       /* region 3 offsets */
+       {528, 496, 416, 448, 448, 512, 544, 480},       /* region 4 offsets */
+       {608, 544, 496, 512, 576, 592, 624, 560}        /* region 5 offsets */
+};
+
+static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = {
+       {240, 240, 112, 0, 0, 0, 0, 96} /* region 1 offsets */
+};
+
 /* General constants */
 #define QM_PQ_MEM_4KB(pq_size) (pq_size ? DIV_ROUND_UP((pq_size + 1) * \
                                                        QM_PQ_ELEMENT_SIZE, \
                                                        0x1000) : 0)
 #define QM_PQ_SIZE_256B(pq_size)       (pq_size ? DIV_ROUND_UP(pq_size, \
                                                                0x100) - 1 : 0)
-#define QM_INVALID_PQ_ID                        0xffff
+#define QM_INVALID_PQ_ID               0xffff
+
 /* Feature enable */
-#define QM_BYPASS_EN                            1
-#define QM_BYTE_CRD_EN                          1
+#define QM_BYPASS_EN   1
+#define QM_BYTE_CRD_EN 1
+
 /* Other PQ constants */
-#define QM_OTHER_PQS_PER_PF                     4
+#define QM_OTHER_PQS_PER_PF    4
+
 /* WFQ constants */
-#define QM_WFQ_UPPER_BOUND             62500000
-#define QM_WFQ_VP_PQ_VOQ_SHIFT          0
-#define QM_WFQ_VP_PQ_PF_SHIFT           5
-#define QM_WFQ_INC_VAL(weight)          ((weight) * 0x9000)
-#define QM_WFQ_MAX_INC_VAL                      43750000
+
+/* Upper bound in MB, 10 * burst size of 1ms in 50Gbps */
+#define QM_WFQ_UPPER_BOUND     62500000
+
+/* Bit  of VOQ in WFQ VP PQ map */
+#define QM_WFQ_VP_PQ_VOQ_SHIFT 0
+
+/* Bit  of PF in WFQ VP PQ map */
+#define QM_WFQ_VP_PQ_PF_E4_SHIFT       5
+
+/* 0x9000 = 4*9*1024 */
+#define QM_WFQ_INC_VAL(weight) ((weight) * 0x9000)
+
+/* Max WFQ increment value is 0.7 * upper bound */
+#define QM_WFQ_MAX_INC_VAL     ((QM_WFQ_UPPER_BOUND * 7) / 10)
 
 /* RL constants */
-#define QM_RL_UPPER_BOUND                       62500000
-#define QM_RL_PERIOD                            5               /* in us */
-#define QM_RL_PERIOD_CLK_25M            (25 * QM_RL_PERIOD)
-#define QM_RL_MAX_INC_VAL                       43750000
-#define QM_RL_INC_VAL(rate)            max_t(u32,      \
-                                             (u32)(((rate ? rate : \
-                                                     1000000) *    \
-                                                    QM_RL_PERIOD * \
-                                                    101) / (8 * 100)), 1)
+
+/* Period in us */
+#define QM_RL_PERIOD   5
+
+/* Period in 25MHz cycles */
+#define QM_RL_PERIOD_CLK_25M   (25 * QM_RL_PERIOD)
+
+/* RL increment value - rate is specified in mbps */
+#define QM_RL_INC_VAL(rate) ({ \
+       typeof(rate) __rate = (rate); \
+       max_t(u32, \
+             (u32)(((__rate ? __rate : 1000000) * QM_RL_PERIOD * 101) / \
+                   (8 * 100)), \
+             1); })
+
+/* PF RL Upper bound is set to 10 * burst size of 1ms in 50Gbps */
+#define QM_PF_RL_UPPER_BOUND   62500000
+
+/* Max PF RL increment value is 0.7 * upper bound */
+#define QM_PF_RL_MAX_INC_VAL   ((QM_PF_RL_UPPER_BOUND * 7) / 10)
+
+/* Vport RL Upper bound, link speed is in Mpbs */
+#define QM_VP_RL_UPPER_BOUND(speed)    ((u32)max_t(u32, \
+                                                   QM_RL_INC_VAL(speed), \
+                                                   9700 + 1000))
+
+/* Max Vport RL increment value is the Vport RL upper bound */
+#define QM_VP_RL_MAX_INC_VAL(speed)    QM_VP_RL_UPPER_BOUND(speed)
+
+/* Vport RL credit threshold in case of QM bypass */
+#define QM_VP_RL_BYPASS_THRESH_SPEED   (QM_VP_RL_UPPER_BOUND(10000) - 1)
+
 /* AFullOprtnstcCrdMask constants */
-#define QM_OPPOR_LINE_VOQ_DEF           1
-#define QM_OPPOR_FW_STOP_DEF            0
-#define QM_OPPOR_PQ_EMPTY_DEF           1
+#define QM_OPPOR_LINE_VOQ_DEF  1
+#define QM_OPPOR_FW_STOP_DEF   0
+#define QM_OPPOR_PQ_EMPTY_DEF  1
+
 /* Command Queue constants */
-#define PBF_CMDQ_PURE_LB_LINES                          150
-#define PBF_CMDQ_LINES_RT_OFFSET(voq)           (               \
-               PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + voq * \
-               (PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET -      \
-                PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET))
-#define PBF_BTB_GUARANTEED_RT_OFFSET(voq)       (            \
-               PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET + voq * \
-               (PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET -      \
-                PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET))
-#define QM_VOQ_LINE_CRD(pbf_cmd_lines)          ((((pbf_cmd_lines) - \
-                                                  4) *              \
-                                                 2) | QM_LINE_CRD_REG_SIGN_BIT)
+
+/* Pure LB CmdQ lines (+spare) */
+#define PBF_CMDQ_PURE_LB_LINES 150
+
+#define PBF_CMDQ_LINES_E5_RSVD_RATIO   8
+
+#define PBF_CMDQ_LINES_RT_OFFSET(ext_voq) \
+       (PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + \
+        (ext_voq) * (PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET - \
+               PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET))
+
+#define PBF_BTB_GUARANTEED_RT_OFFSET(ext_voq) \
+       (PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET + \
+        (ext_voq) * (PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET - \
+               PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET))
+
+#define QM_VOQ_LINE_CRD(pbf_cmd_lines) \
+       ((((pbf_cmd_lines) - 4) * 2) | QM_LINE_CRD_REG_SIGN_BIT)
+
 /* BTB: blocks constants (block size = 256B) */
-#define BTB_JUMBO_PKT_BLOCKS            38
-#define BTB_HEADROOM_BLOCKS                     BTB_JUMBO_PKT_BLOCKS
-#define BTB_PURE_LB_FACTOR                      10
-#define BTB_PURE_LB_RATIO                       7
+
+/* 256B blocks in 9700B packet */
+#define BTB_JUMBO_PKT_BLOCKS   38
+
+/* Headroom per-port */
+#define BTB_HEADROOM_BLOCKS    BTB_JUMBO_PKT_BLOCKS
+#define BTB_PURE_LB_FACTOR     10
+
+/* Factored (hence really 0.7) */
+#define BTB_PURE_LB_RATIO      7
+
 /* QM stop command constants */
-#define QM_STOP_PQ_MASK_WIDTH           32
-#define QM_STOP_CMD_ADDR                2
-#define QM_STOP_CMD_STRUCT_SIZE         2
-#define QM_STOP_CMD_PAUSE_MASK_OFFSET   0
-#define QM_STOP_CMD_PAUSE_MASK_SHIFT    0
-#define QM_STOP_CMD_PAUSE_MASK_MASK     -1
-#define QM_STOP_CMD_GROUP_ID_OFFSET     1
-#define QM_STOP_CMD_GROUP_ID_SHIFT      16
-#define QM_STOP_CMD_GROUP_ID_MASK       15
-#define QM_STOP_CMD_PQ_TYPE_OFFSET      1
-#define QM_STOP_CMD_PQ_TYPE_SHIFT       24
-#define QM_STOP_CMD_PQ_TYPE_MASK        1
-#define QM_STOP_CMD_MAX_POLL_COUNT      100
-#define QM_STOP_CMD_POLL_PERIOD_US      500
+#define QM_STOP_PQ_MASK_WIDTH          32
+#define QM_STOP_CMD_ADDR               2
+#define QM_STOP_CMD_STRUCT_SIZE                2
+#define QM_STOP_CMD_PAUSE_MASK_OFFSET  0
+#define QM_STOP_CMD_PAUSE_MASK_SHIFT   0
+#define QM_STOP_CMD_PAUSE_MASK_MASK    -1
+#define QM_STOP_CMD_GROUP_ID_OFFSET    1
+#define QM_STOP_CMD_GROUP_ID_SHIFT     16
+#define QM_STOP_CMD_GROUP_ID_MASK      15
+#define QM_STOP_CMD_PQ_TYPE_OFFSET     1
+#define QM_STOP_CMD_PQ_TYPE_SHIFT      24
+#define QM_STOP_CMD_PQ_TYPE_MASK       1
+#define QM_STOP_CMD_MAX_POLL_COUNT     100
+#define QM_STOP_CMD_POLL_PERIOD_US     500
 
 /* QM command macros */
-#define QM_CMD_STRUCT_SIZE(cmd)                        cmd ## \
-       _STRUCT_SIZE
-#define QM_CMD_SET_FIELD(var, cmd, field,                                \
-                        value)        SET_FIELD(var[cmd ## _ ## field ## \
-                                                    _OFFSET],            \
-                                                cmd ## _ ## field,       \
-                                                value)
-/* QM: VOQ macros */
-#define PHYS_VOQ(port, tc, max_phys_tcs_per_port) ((port) *    \
-                                                  (max_phys_tcs_per_port) + \
-                                                  (tc))
-#define LB_VOQ(port)                           ( \
-               MAX_PHYS_VOQS + (port))
-#define VOQ(port, tc, max_phy_tcs_pr_port)     \
-       ((tc) <         \
-        LB_TC ? PHYS_VOQ(port,         \
-                         tc,                    \
-                         max_phy_tcs_pr_port) \
-               : LB_VOQ(port))
+#define QM_CMD_STRUCT_SIZE(cmd)        cmd ## _STRUCT_SIZE
+#define QM_CMD_SET_FIELD(var, cmd, field, value) \
+       SET_FIELD(var[cmd ## _ ## field ## _OFFSET], \
+                 cmd ## _ ## field, \
+                 value)
+
+#define QM_INIT_TX_PQ_MAP(p_hwfn, map, chip, pq_id, rl_valid, vp_pq_id, rl_id, \
+                         ext_voq, wrr) \
+       do { \
+               typeof(map) __map; \
+               memset(&__map, 0, sizeof(__map)); \
+               SET_FIELD(__map.reg, QM_RF_PQ_MAP_ ## chip ## _PQ_VALID, 1); \
+               SET_FIELD(__map.reg, QM_RF_PQ_MAP_ ## chip ## _RL_VALID, \
+                         rl_valid); \
+               SET_FIELD(__map.reg, QM_RF_PQ_MAP_ ## chip ## _VP_PQ_ID, \
+                         vp_pq_id); \
+               SET_FIELD(__map.reg, QM_RF_PQ_MAP_ ## chip ## _RL_ID, rl_id); \
+               SET_FIELD(__map.reg, QM_RF_PQ_MAP_ ## chip ## _VOQ, ext_voq); \
+               SET_FIELD(__map.reg, \
+                         QM_RF_PQ_MAP_ ## chip ## _WRR_WEIGHT_GROUP, wrr); \
+               STORE_RT_REG(p_hwfn, QM_REG_TXPQMAP_RT_OFFSET + (pq_id), \
+                            *((u32 *)&__map)); \
+               (map) = __map; \
+       } while (0)
+
+#define WRITE_PQ_INFO_TO_RAM   1
+#define PQ_INFO_ELEMENT(vp, pf, tc, port, rl_valid, rl) \
+       (((vp) << 0) | ((pf) << 12) | ((tc) << 16) | ((port) << 20) | \
+       ((rl_valid) << 22) | ((rl) << 24))
+#define PQ_INFO_RAM_GRC_ADDRESS(pq_id) \
+       (XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + 21776 + (pq_id) * 4)
+
 /******************** INTERNAL IMPLEMENTATION *********************/
+
+/* Returns the external VOQ number */
+static u8 qed_get_ext_voq(struct qed_hwfn *p_hwfn,
+                         u8 port_id, u8 tc, u8 max_phys_tcs_per_port)
+{
+       if (tc == PURE_LB_TC)
+               return NUM_OF_PHYS_TCS * MAX_NUM_PORTS_BB + port_id;
+       else
+               return port_id * max_phys_tcs_per_port + tc;
+}
+
 /* Prepare PF RL enable/disable runtime init values */
 static void qed_enable_pf_rl(struct qed_hwfn *p_hwfn, bool pf_rl_en)
 {
        STORE_RT_REG(p_hwfn, QM_REG_RLPFENABLE_RT_OFFSET, pf_rl_en ? 1 : 0);
        if (pf_rl_en) {
+               u8 num_ext_voqs = MAX_NUM_VOQS_E4;
+               u64 voq_bit_mask = ((u64)1 << num_ext_voqs) - 1;
+
                /* Enable RLs for all VOQs */
-               STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_RT_OFFSET,
-                            (1 << MAX_NUM_VOQS) - 1);
+               STORE_RT_REG(p_hwfn,
+                            QM_REG_RLPFVOQENABLE_RT_OFFSET,
+                            (u32)voq_bit_mask);
+               if (num_ext_voqs >= 32)
+                       STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_MSB_RT_OFFSET,
+                                    (u32)(voq_bit_mask >> 32));
+
                /* Write RL period */
                STORE_RT_REG(p_hwfn,
                             QM_REG_RLPFPERIOD_RT_OFFSET, QM_RL_PERIOD_CLK_25M);
@@ -147,7 +243,7 @@ static void qed_enable_pf_rl(struct qed_hwfn *p_hwfn, bool pf_rl_en)
                if (QM_BYPASS_EN)
                        STORE_RT_REG(p_hwfn,
                                     QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET,
-                                    QM_RL_UPPER_BOUND);
+                                    QM_PF_RL_UPPER_BOUND);
        }
 }
 
@@ -181,7 +277,7 @@ static void qed_enable_vport_rl(struct qed_hwfn *p_hwfn, bool vport_rl_en)
                if (QM_BYPASS_EN)
                        STORE_RT_REG(p_hwfn,
                                     QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET,
-                                    QM_RL_UPPER_BOUND);
+                                    QM_VP_RL_BYPASS_THRESH_SPEED);
        }
 }
 
@@ -202,15 +298,15 @@ static void qed_enable_vport_wfq(struct qed_hwfn *p_hwfn, bool vport_wfq_en)
  * the specified VOQ.
  */
 static void qed_cmdq_lines_voq_rt_init(struct qed_hwfn *p_hwfn,
-                                      u8 voq, u16 cmdq_lines)
+                                      u8 ext_voq, u16 cmdq_lines)
 {
-       u32 qm_line_crd;
+       u32 qm_line_crd = QM_VOQ_LINE_CRD(cmdq_lines);
 
-       qm_line_crd = QM_VOQ_LINE_CRD(cmdq_lines);
-       OVERWRITE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq),
+       OVERWRITE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(ext_voq),
                         (u32)cmdq_lines);
-       STORE_RT_REG(p_hwfn, QM_REG_VOQCRDLINE_RT_OFFSET + voq, qm_line_crd);
-       STORE_RT_REG(p_hwfn, QM_REG_VOQINITCRDLINE_RT_OFFSET + voq,
+       STORE_RT_REG(p_hwfn, QM_REG_VOQCRDLINE_RT_OFFSET + ext_voq,
+                    qm_line_crd);
+       STORE_RT_REG(p_hwfn, QM_REG_VOQINITCRDLINE_RT_OFFSET + ext_voq,
                     qm_line_crd);
 }
 
@@ -221,43 +317,52 @@ static void qed_cmdq_lines_rt_init(
        u8 max_phys_tcs_per_port,
        struct init_qm_port_params port_params[MAX_NUM_PORTS])
 {
-       u8 tc, voq, port_id, num_tcs_in_port;
+       u8 tc, ext_voq, port_id, num_tcs_in_port;
+       u8 num_ext_voqs = MAX_NUM_VOQS_E4;
+
+       /* Clear PBF lines of all VOQs */
+       for (ext_voq = 0; ext_voq < num_ext_voqs; ext_voq++)
+               STORE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(ext_voq), 0);
 
-       /* Clear PBF lines for all VOQs */
-       for (voq = 0; voq < MAX_NUM_VOQS; voq++)
-               STORE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq), 0);
        for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
-               if (port_params[port_id].active) {
-                       u16 phys_lines, phys_lines_per_tc;
-
-                       /* find #lines to divide between active phys TCs */
-                       phys_lines = port_params[port_id].num_pbf_cmd_lines -
-                                    PBF_CMDQ_PURE_LB_LINES;
-                       /* find #lines per active physical TC */
-                       num_tcs_in_port = 0;
-                       for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
-                               if (((port_params[port_id].active_phys_tcs >>
-                                     tc) & 0x1) == 1)
-                                       num_tcs_in_port++;
-                       }
+               u16 phys_lines, phys_lines_per_tc;
 
-                       phys_lines_per_tc = phys_lines / num_tcs_in_port;
-                       /* init registers per active TC */
-                       for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
-                               if (((port_params[port_id].active_phys_tcs >>
-                                     tc) & 0x1) != 1)
-                                       continue;
+               if (!port_params[port_id].active)
+                       continue;
 
-                               voq = PHYS_VOQ(port_id, tc,
-                                              max_phys_tcs_per_port);
-                               qed_cmdq_lines_voq_rt_init(p_hwfn, voq,
-                                                          phys_lines_per_tc);
-                       }
+               /* Find number of command queue lines to divide between the
+                * active physical TCs. In E5, 1/8 of the lines are reserved.
+                * the lines for pure LB TC are subtracted.
+                */
+               phys_lines = port_params[port_id].num_pbf_cmd_lines;
+               phys_lines -= PBF_CMDQ_PURE_LB_LINES;
+
+               /* Find #lines per active physical TC */
+               num_tcs_in_port = 0;
+               for (tc = 0; tc < max_phys_tcs_per_port; tc++)
+                       if (((port_params[port_id].active_phys_tcs >>
+                             tc) & 0x1) == 1)
+                               num_tcs_in_port++;
+               phys_lines_per_tc = phys_lines / num_tcs_in_port;
 
-                       /* init registers for pure LB TC */
-                       qed_cmdq_lines_voq_rt_init(p_hwfn, LB_VOQ(port_id),
-                                                  PBF_CMDQ_PURE_LB_LINES);
+               /* Init registers per active TC */
+               for (tc = 0; tc < max_phys_tcs_per_port; tc++) {
+                       ext_voq = qed_get_ext_voq(p_hwfn,
+                                                 port_id,
+                                                 tc, max_phys_tcs_per_port);
+                       if (((port_params[port_id].active_phys_tcs >>
+                             tc) & 0x1) == 1)
+                               qed_cmdq_lines_voq_rt_init(p_hwfn,
+                                                          ext_voq,
+                                                          phys_lines_per_tc);
                }
+
+               /* Init registers for pure LB TC */
+               ext_voq = qed_get_ext_voq(p_hwfn,
+                                         port_id,
+                                         PURE_LB_TC, max_phys_tcs_per_port);
+               qed_cmdq_lines_voq_rt_init(p_hwfn,
+                                          ext_voq, PBF_CMDQ_PURE_LB_LINES);
        }
 }
 
@@ -268,11 +373,9 @@ static void qed_btb_blocks_rt_init(
        struct init_qm_port_params port_params[MAX_NUM_PORTS])
 {
        u32 usable_blocks, pure_lb_blocks, phys_blocks;
-       u8 tc, voq, port_id, num_tcs_in_port;
+       u8 tc, ext_voq, port_id, num_tcs_in_port;
 
        for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
-               u32 temp;
-
                if (!port_params[port_id].active)
                        continue;
 
@@ -280,13 +383,14 @@ static void qed_btb_blocks_rt_init(
                usable_blocks = port_params[port_id].num_btb_blocks -
                                BTB_HEADROOM_BLOCKS;
 
-               /* find blocks per physical TC */
+               /* Find blocks per physical TC. Use factor to avoid floating
+                * arithmethic.
+                */
                num_tcs_in_port = 0;
-               for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
+               for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++)
                        if (((port_params[port_id].active_phys_tcs >>
                              tc) & 0x1) == 1)
                                num_tcs_in_port++;
-               }
 
                pure_lb_blocks = (usable_blocks * BTB_PURE_LB_FACTOR) /
                                 (num_tcs_in_port * BTB_PURE_LB_FACTOR +
@@ -299,47 +403,55 @@ static void qed_btb_blocks_rt_init(
                /* Init physical TCs */
                for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
                        if (((port_params[port_id].active_phys_tcs >>
-                             tc) & 0x1) != 1)
-                               continue;
-
-                       voq = PHYS_VOQ(port_id, tc,
-                                      max_phys_tcs_per_port);
-                       STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(voq),
-                                    phys_blocks);
+                             tc) & 0x1) == 1) {
+                               ext_voq =
+                                       qed_get_ext_voq(p_hwfn,
+                                                       port_id,
+                                                       tc,
+                                                       max_phys_tcs_per_port);
+                               STORE_RT_REG(p_hwfn,
+                                            PBF_BTB_GUARANTEED_RT_OFFSET
+                                            (ext_voq), phys_blocks);
+                       }
                }
 
                /* Init pure LB TC */
-               temp = LB_VOQ(port_id);
-               STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(temp),
+               ext_voq = qed_get_ext_voq(p_hwfn,
+                                         port_id,
+                                         PURE_LB_TC, max_phys_tcs_per_port);
+               STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(ext_voq),
                             pure_lb_blocks);
        }
 }
 
 /* Prepare Tx PQ mapping runtime init values for the specified PF */
-static void qed_tx_pq_map_rt_init(
-       struct qed_hwfn *p_hwfn,
-       struct qed_ptt *p_ptt,
-       struct qed_qm_pf_rt_init_params *p_params,
-       u32 base_mem_addr_4kb)
+static void qed_tx_pq_map_rt_init(struct qed_hwfn *p_hwfn,
+                                 struct qed_ptt *p_ptt,
+                                 struct qed_qm_pf_rt_init_params *p_params,
+                                 u32 base_mem_addr_4kb)
 {
-       struct init_qm_vport_params *vport_params = p_params->vport_params;
-       u16 num_pqs = p_params->num_pf_pqs + p_params->num_vf_pqs;
-       u16 first_pq_group = p_params->start_pq / QM_PF_QUEUE_GROUP_SIZE;
-       u16 last_pq_group = (p_params->start_pq + num_pqs - 1) /
-                           QM_PF_QUEUE_GROUP_SIZE;
-       u16 i, pq_id, pq_group;
-
-       /* A bit per Tx PQ indicating if the PQ is associated with a VF */
        u32 tx_pq_vf_mask[MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE] = { 0 };
+       struct init_qm_vport_params *vport_params = p_params->vport_params;
        u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE;
-       u32 pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_pf_cids);
-       u32 vport_pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_vf_cids);
-       u32 mem_addr_4kb = base_mem_addr_4kb;
+       u16 num_pqs, first_pq_group, last_pq_group, i, j, pq_id, pq_group;
+       struct init_qm_pq_params *pq_params = p_params->pq_params;
+       u32 pq_mem_4kb, vport_pq_mem_4kb, mem_addr_4kb;
+
+       num_pqs = p_params->num_pf_pqs + p_params->num_vf_pqs;
+
+       first_pq_group = p_params->start_pq / QM_PF_QUEUE_GROUP_SIZE;
+       last_pq_group = (p_params->start_pq + num_pqs - 1) /
+                       QM_PF_QUEUE_GROUP_SIZE;
+
+       pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_pf_cids);
+       vport_pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_vf_cids);
+       mem_addr_4kb = base_mem_addr_4kb;
 
        /* Set mapping from PQ group to PF */
        for (pq_group = first_pq_group; pq_group <= last_pq_group; pq_group++)
                STORE_RT_REG(p_hwfn, QM_REG_PQTX2PF_0_RT_OFFSET + pq_group,
                             (u32)(p_params->pf_id));
+
        /* Set PQ sizes */
        STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_0_RT_OFFSET,
                     QM_PQ_SIZE_256B(p_params->num_pf_cids));
@@ -348,58 +460,82 @@ static void qed_tx_pq_map_rt_init(
 
        /* Go over all Tx PQs */
        for (i = 0, pq_id = p_params->start_pq; i < num_pqs; i++, pq_id++) {
-               u8 voq = VOQ(p_params->port_id, p_params->pq_params[i].tc_id,
-                            p_params->max_phys_tcs_per_port);
-               bool is_vf_pq = (i >= p_params->num_pf_pqs);
-               struct qm_rf_pq_map tx_pq_map;
-
-               bool rl_valid = p_params->pq_params[i].rl_valid &&
-                               (p_params->pq_params[i].vport_id <
-                                MAX_QM_GLOBAL_RLS);
+               u8 ext_voq, vport_id_in_pf, tc_id = pq_params[i].tc_id;
+               u32 max_qm_global_rls = MAX_QM_GLOBAL_RLS;
+               struct qm_rf_pq_map_e4 tx_pq_map;
+               bool is_vf_pq, rl_valid;
+               u16 *p_first_tx_pq_id;
+
+               ext_voq = qed_get_ext_voq(p_hwfn,
+                                         p_params->port_id,
+                                         tc_id,
+                                         p_params->max_phys_tcs_per_port);
+               is_vf_pq = (i >= p_params->num_pf_pqs);
+               rl_valid = pq_params[i].rl_valid &&
+                          pq_params[i].vport_id < max_qm_global_rls;
 
                /* Update first Tx PQ of VPORT/TC */
-               u8 vport_id_in_pf = p_params->pq_params[i].vport_id -
-                                   p_params->start_vport;
-               u16 *pq_ids = &vport_params[vport_id_in_pf].first_tx_pq_id[0];
-               u16 first_tx_pq_id = pq_ids[p_params->pq_params[i].tc_id];
+               vport_id_in_pf = pq_params[i].vport_id - p_params->start_vport;
+               p_first_tx_pq_id =
+                   &vport_params[vport_id_in_pf].first_tx_pq_id[tc_id];
+               if (*p_first_tx_pq_id == QM_INVALID_PQ_ID) {
+                       u32 map_val =
+                               (ext_voq << QM_WFQ_VP_PQ_VOQ_SHIFT) |
+                               (p_params->pf_id << QM_WFQ_VP_PQ_PF_E4_SHIFT);
 
-               if (first_tx_pq_id == QM_INVALID_PQ_ID) {
                        /* Create new VP PQ */
-                       pq_ids[p_params->pq_params[i].tc_id] = pq_id;
-                       first_tx_pq_id = pq_id;
+                       *p_first_tx_pq_id = pq_id;
 
                        /* Map VP PQ to VOQ and PF */
                        STORE_RT_REG(p_hwfn,
                                     QM_REG_WFQVPMAP_RT_OFFSET +
-                                    first_tx_pq_id,
-                                    (voq << QM_WFQ_VP_PQ_VOQ_SHIFT) |
-                                    (p_params->pf_id <<
-                                     QM_WFQ_VP_PQ_PF_SHIFT));
+                                    *p_first_tx_pq_id,
+                                    map_val);
                }
 
-               if (p_params->pq_params[i].rl_valid && !rl_valid)
+               /* Check RL ID */
+               if (pq_params[i].rl_valid && pq_params[i].vport_id >=
+                   max_qm_global_rls)
                        DP_NOTICE(p_hwfn,
-                                 "Invalid VPORT ID for rate limiter configuration");
-               /* Fill PQ map entry */
-               memset(&tx_pq_map, 0, sizeof(tx_pq_map));
-               SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_PQ_VALID, 1);
-               SET_FIELD(tx_pq_map.reg,
-                         QM_RF_PQ_MAP_RL_VALID, rl_valid ? 1 : 0);
-               SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VP_PQ_ID, first_tx_pq_id);
-               SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_ID,
-                         rl_valid ?
-                         p_params->pq_params[i].vport_id : 0);
-               SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VOQ, voq);
-               SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_WRR_WEIGHT_GROUP,
-                         p_params->pq_params[i].wrr_group);
-               /* Write PQ map entry to CAM */
-               STORE_RT_REG(p_hwfn, QM_REG_TXPQMAP_RT_OFFSET + pq_id,
-                            *((u32 *)&tx_pq_map));
-               /* Set base address */
+                                 "Invalid VPORT ID for rate limiter configuration\n");
+
+               /* Prepare PQ map entry */
+               QM_INIT_TX_PQ_MAP(p_hwfn,
+                                 tx_pq_map,
+                                 E4,
+                                 pq_id,
+                                 rl_valid ? 1 : 0,
+                                 *p_first_tx_pq_id,
+                                 rl_valid ? pq_params[i].vport_id : 0,
+                                 ext_voq, pq_params[i].wrr_group);
+
+               /* Set PQ base address */
                STORE_RT_REG(p_hwfn,
                             QM_REG_BASEADDRTXPQ_RT_OFFSET + pq_id,
                             mem_addr_4kb);
 
+               /* Clear PQ pointer table entry (64 bit) */
+               if (p_params->is_pf_loading)
+                       for (j = 0; j < 2; j++)
+                               STORE_RT_REG(p_hwfn,
+                                            QM_REG_PTRTBLTX_RT_OFFSET +
+                                            (pq_id * 2) + j, 0);
+
+               /* Write PQ info to RAM */
+               if (WRITE_PQ_INFO_TO_RAM != 0) {
+                       u32 pq_info = 0;
+
+                       pq_info = PQ_INFO_ELEMENT(*p_first_tx_pq_id,
+                                                 p_params->pf_id,
+                                                 tc_id,
+                                                 p_params->port_id,
+                                                 rl_valid ? 1 : 0,
+                                                 rl_valid ?
+                                                 pq_params[i].vport_id : 0);
+                       qed_wr(p_hwfn, p_ptt, PQ_INFO_RAM_GRC_ADDRESS(pq_id),
+                              pq_info);
+               }
+
                /* If VF PQ, add indication to PQ VF mask */
                if (is_vf_pq) {
                        tx_pq_vf_mask[pq_id /
@@ -421,16 +557,16 @@ static void qed_tx_pq_map_rt_init(
 
 /* Prepare Other PQ mapping runtime init values for the specified PF */
 static void qed_other_pq_map_rt_init(struct qed_hwfn *p_hwfn,
-                                    u8 port_id,
                                     u8 pf_id,
+                                    bool is_pf_loading,
                                     u32 num_pf_cids,
                                     u32 num_tids, u32 base_mem_addr_4kb)
 {
        u32 pq_size, pq_mem_4kb, mem_addr_4kb;
-       u16 i, pq_id, pq_group;
+       u16 i, j, pq_id, pq_group;
 
-       /* a single other PQ group is used in each PF,
-        * where PQ group i is used in PF i.
+       /* A single other PQ group is used in each PF, where PQ group i is used
+        * in PF i.
         */
        pq_group = pf_id;
        pq_size = num_pf_cids + num_tids;
@@ -440,16 +576,25 @@ static void qed_other_pq_map_rt_init(struct qed_hwfn *p_hwfn,
        /* Map PQ group to PF */
        STORE_RT_REG(p_hwfn, QM_REG_PQOTHER2PF_0_RT_OFFSET + pq_group,
                     (u32)(pf_id));
+
        /* Set PQ sizes */
        STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_2_RT_OFFSET,
                     QM_PQ_SIZE_256B(pq_size));
 
-       /* Set base address */
        for (i = 0, pq_id = pf_id * QM_PF_QUEUE_GROUP_SIZE;
             i < QM_OTHER_PQS_PER_PF; i++, pq_id++) {
+               /* Set PQ base address */
                STORE_RT_REG(p_hwfn,
                             QM_REG_BASEADDROTHERPQ_RT_OFFSET + pq_id,
                             mem_addr_4kb);
+
+               /* Clear PQ pointer table entry */
+               if (is_pf_loading)
+                       for (j = 0; j < 2; j++)
+                               STORE_RT_REG(p_hwfn,
+                                            QM_REG_PTRTBLOTHER_RT_OFFSET +
+                                            (pq_id * 2) + j, 0);
+
                mem_addr_4kb += pq_mem_4kb;
        }
 }
@@ -461,16 +606,11 @@ static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn,
                              struct qed_qm_pf_rt_init_params *p_params)
 {
        u16 num_tx_pqs = p_params->num_pf_pqs + p_params->num_vf_pqs;
-       u32 crd_reg_offset;
-       u32 inc_val;
+       struct init_qm_pq_params *pq_params = p_params->pq_params;
+       u32 inc_val, crd_reg_offset;
+       u8 ext_voq;
        u16 i;
 
-       if (p_params->pf_id < MAX_NUM_PFS_BB)
-               crd_reg_offset = QM_REG_WFQPFCRD_RT_OFFSET;
-       else
-               crd_reg_offset = QM_REG_WFQPFCRD_MSB_RT_OFFSET;
-       crd_reg_offset += p_params->pf_id % MAX_NUM_PFS_BB;
-
        inc_val = QM_WFQ_INC_VAL(p_params->pf_wfq);
        if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
                DP_NOTICE(p_hwfn, "Invalid PF WFQ weight configuration\n");
@@ -478,19 +618,26 @@ static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn,
        }
 
        for (i = 0; i < num_tx_pqs; i++) {
-               u8 voq = VOQ(p_params->port_id, p_params->pq_params[i].tc_id,
-                            p_params->max_phys_tcs_per_port);
-
+               ext_voq = qed_get_ext_voq(p_hwfn,
+                                         p_params->port_id,
+                                         pq_params[i].tc_id,
+                                         p_params->max_phys_tcs_per_port);
+               crd_reg_offset =
+                       (p_params->pf_id < MAX_NUM_PFS_BB ?
+                        QM_REG_WFQPFCRD_RT_OFFSET :
+                        QM_REG_WFQPFCRD_MSB_RT_OFFSET) +
+                       ext_voq * MAX_NUM_PFS_BB +
+                       (p_params->pf_id % MAX_NUM_PFS_BB);
                OVERWRITE_RT_REG(p_hwfn,
-                                crd_reg_offset + voq * MAX_NUM_PFS_BB,
-                                QM_WFQ_CRD_REG_SIGN_BIT);
+                                crd_reg_offset, (u32)QM_WFQ_CRD_REG_SIGN_BIT);
        }
 
        STORE_RT_REG(p_hwfn,
                     QM_REG_WFQPFUPPERBOUND_RT_OFFSET + p_params->pf_id,
-                    QM_WFQ_UPPER_BOUND | QM_WFQ_CRD_REG_SIGN_BIT);
+                    QM_WFQ_UPPER_BOUND | (u32)QM_WFQ_CRD_REG_SIGN_BIT);
        STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + p_params->pf_id,
                     inc_val);
+
        return 0;
 }
 
@@ -501,15 +648,19 @@ static int qed_pf_rl_rt_init(struct qed_hwfn *p_hwfn, u8 pf_id, u32 pf_rl)
 {
        u32 inc_val = QM_RL_INC_VAL(pf_rl);
 
-       if (inc_val > QM_RL_MAX_INC_VAL) {
+       if (inc_val > QM_PF_RL_MAX_INC_VAL) {
                DP_NOTICE(p_hwfn, "Invalid PF rate limit configuration\n");
                return -1;
        }
-       STORE_RT_REG(p_hwfn, QM_REG_RLPFCRD_RT_OFFSET + pf_id,
-                    QM_RL_CRD_REG_SIGN_BIT);
-       STORE_RT_REG(p_hwfn, QM_REG_RLPFUPPERBOUND_RT_OFFSET + pf_id,
-                    QM_RL_UPPER_BOUND | QM_RL_CRD_REG_SIGN_BIT);
+
+       STORE_RT_REG(p_hwfn,
+                    QM_REG_RLPFCRD_RT_OFFSET + pf_id,
+                    (u32)QM_RL_CRD_REG_SIGN_BIT);
+       STORE_RT_REG(p_hwfn,
+                    QM_REG_RLPFUPPERBOUND_RT_OFFSET + pf_id,
+                    QM_PF_RL_UPPER_BOUND | (u32)QM_RL_CRD_REG_SIGN_BIT);
        STORE_RT_REG(p_hwfn, QM_REG_RLPFINCVAL_RT_OFFSET + pf_id, inc_val);
+
        return 0;
 }
 
@@ -520,12 +671,12 @@ static int qed_vp_wfq_rt_init(struct qed_hwfn *p_hwfn,
                              u8 num_vports,
                              struct init_qm_vport_params *vport_params)
 {
+       u16 vport_pq_id;
        u32 inc_val;
        u8 tc, i;
 
        /* Go over all PF VPORTs */
        for (i = 0; i < num_vports; i++) {
-
                if (!vport_params[i].vport_wfq)
                        continue;
 
@@ -536,17 +687,14 @@ static int qed_vp_wfq_rt_init(struct qed_hwfn *p_hwfn,
                        return -1;
                }
 
-               /* each VPORT can have several VPORT PQ IDs for
-                * different TCs
-                */
+               /* Each VPORT can have several VPORT PQ IDs for various TCs */
                for (tc = 0; tc < NUM_OF_TCS; tc++) {
-                       u16 vport_pq_id = vport_params[i].first_tx_pq_id[tc];
-
+                       vport_pq_id = vport_params[i].first_tx_pq_id[tc];
                        if (vport_pq_id != QM_INVALID_PQ_ID) {
                                STORE_RT_REG(p_hwfn,
                                             QM_REG_WFQVPCRD_RT_OFFSET +
                                             vport_pq_id,
-                                            QM_WFQ_CRD_REG_SIGN_BIT);
+                                            (u32)QM_WFQ_CRD_REG_SIGN_BIT);
                                STORE_RT_REG(p_hwfn,
                                             QM_REG_WFQVPWEIGHT_RT_OFFSET +
                                             vport_pq_id, inc_val);
@@ -557,12 +705,17 @@ static int qed_vp_wfq_rt_init(struct qed_hwfn *p_hwfn,
        return 0;
 }
 
+/* Prepare VPORT RL runtime init values for the specified VPORTs.
+ * Return -1 on error.
+ */
 static int qed_vport_rl_rt_init(struct qed_hwfn *p_hwfn,
                                u8 start_vport,
                                u8 num_vports,
+                               u32 link_speed,
                                struct init_qm_vport_params *vport_params)
 {
        u8 i, vport_id;
+       u32 inc_val;
 
        if (start_vport + num_vports >= MAX_QM_GLOBAL_RLS) {
                DP_NOTICE(p_hwfn,
@@ -572,22 +725,22 @@ static int qed_vport_rl_rt_init(struct qed_hwfn *p_hwfn,
 
        /* Go over all PF VPORTs */
        for (i = 0, vport_id = start_vport; i < num_vports; i++, vport_id++) {
-               u32 inc_val = QM_RL_INC_VAL(vport_params[i].vport_rl);
-
-               if (inc_val > QM_RL_MAX_INC_VAL) {
+               inc_val = QM_RL_INC_VAL(vport_params[i].vport_rl ?
+                         vport_params[i].vport_rl :
+                         link_speed);
+               if (inc_val > QM_VP_RL_MAX_INC_VAL(link_speed)) {
                        DP_NOTICE(p_hwfn,
                                  "Invalid VPORT rate-limit configuration\n");
                        return -1;
                }
 
-               STORE_RT_REG(p_hwfn,
-                            QM_REG_RLGLBLCRD_RT_OFFSET + vport_id,
-                            QM_RL_CRD_REG_SIGN_BIT);
+               STORE_RT_REG(p_hwfn, QM_REG_RLGLBLCRD_RT_OFFSET + vport_id,
+                            (u32)QM_RL_CRD_REG_SIGN_BIT);
                STORE_RT_REG(p_hwfn,
                             QM_REG_RLGLBLUPPERBOUND_RT_OFFSET + vport_id,
-                            QM_RL_UPPER_BOUND | QM_RL_CRD_REG_SIGN_BIT);
-               STORE_RT_REG(p_hwfn,
-                            QM_REG_RLGLBLINCVAL_RT_OFFSET + vport_id,
+                            QM_VP_RL_UPPER_BOUND(link_speed) |
+                            (u32)QM_RL_CRD_REG_SIGN_BIT);
+               STORE_RT_REG(p_hwfn, QM_REG_RLGLBLINCVAL_RT_OFFSET + vport_id,
                             inc_val);
        }
 
@@ -599,7 +752,7 @@ static bool qed_poll_on_qm_cmd_ready(struct qed_hwfn *p_hwfn,
 {
        u32 reg_val, i;
 
-       for (i = 0, reg_val = 0; i < QM_STOP_CMD_MAX_POLL_COUNT && reg_val == 0;
+       for (i = 0, reg_val = 0; i < QM_STOP_CMD_MAX_POLL_COUNT && !reg_val;
             i++) {
                udelay(QM_STOP_CMD_POLL_PERIOD_US);
                reg_val = qed_rd(p_hwfn, p_ptt, QM_REG_SDMCMDREADY);
@@ -632,8 +785,8 @@ static bool qed_send_qm_cmd(struct qed_hwfn *p_hwfn,
 }
 
 /******************** INTERFACE IMPLEMENTATION *********************/
-u32 qed_qm_pf_mem_size(u8 pf_id,
-                      u32 num_pf_cids,
+
+u32 qed_qm_pf_mem_size(u32 num_pf_cids,
                       u32 num_vf_cids,
                       u32 num_tids, u16 num_pf_pqs, u16 num_vf_pqs)
 {
@@ -642,11 +795,10 @@ u32 qed_qm_pf_mem_size(u8 pf_id,
               QM_PQ_MEM_4KB(num_pf_cids + num_tids) * QM_OTHER_PQS_PER_PF;
 }
 
-int qed_qm_common_rt_init(
-       struct qed_hwfn *p_hwfn,
-       struct qed_qm_common_rt_init_params *p_params)
+int qed_qm_common_rt_init(struct qed_hwfn *p_hwfn,
+                         struct qed_qm_common_rt_init_params *p_params)
 {
-       /* init AFullOprtnstcCrdMask */
+       /* Init AFullOprtnstcCrdMask */
        u32 mask = (QM_OPPOR_LINE_VOQ_DEF <<
                    QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_SHIFT) |
                   (QM_BYTE_CRD_EN << QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_SHIFT) |
@@ -664,18 +816,31 @@ int qed_qm_common_rt_init(
                    QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_SHIFT);
 
        STORE_RT_REG(p_hwfn, QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET, mask);
+
+       /* Enable/disable PF RL */
        qed_enable_pf_rl(p_hwfn, p_params->pf_rl_en);
+
+       /* Enable/disable PF WFQ */
        qed_enable_pf_wfq(p_hwfn, p_params->pf_wfq_en);
+
+       /* Enable/disable VPORT RL */
        qed_enable_vport_rl(p_hwfn, p_params->vport_rl_en);
+
+       /* Enable/disable VPORT WFQ */
        qed_enable_vport_wfq(p_hwfn, p_params->vport_wfq_en);
+
+       /* Init PBF CMDQ line credit */
        qed_cmdq_lines_rt_init(p_hwfn,
                               p_params->max_ports_per_engine,
                               p_params->max_phys_tcs_per_port,
                               p_params->port_params);
+
+       /* Init BTB blocks in PBF */
        qed_btb_blocks_rt_init(p_hwfn,
                               p_params->max_ports_per_engine,
                               p_params->max_phys_tcs_per_port,
                               p_params->port_params);
+
        return 0;
 }
 
@@ -695,24 +860,31 @@ int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn,
                        vport_params[i].first_tx_pq_id[tc] = QM_INVALID_PQ_ID;
 
        /* Map Other PQs (if any) */
-       qed_other_pq_map_rt_init(p_hwfn, p_params->port_id, p_params->pf_id,
-                                p_params->num_pf_cids, p_params->num_tids, 0);
+       qed_other_pq_map_rt_init(p_hwfn,
+                                p_params->pf_id,
+                                p_params->is_pf_loading, p_params->num_pf_cids,
+                                p_params->num_tids, 0);
 
        /* Map Tx PQs */
        qed_tx_pq_map_rt_init(p_hwfn, p_ptt, p_params, other_mem_size_4kb);
 
+       /* Init PF WFQ */
        if (p_params->pf_wfq)
                if (qed_pf_wfq_rt_init(p_hwfn, p_params))
                        return -1;
 
+       /* Init PF RL */
        if (qed_pf_rl_rt_init(p_hwfn, p_params->pf_id, p_params->pf_rl))
                return -1;
 
+       /* Set VPORT WFQ */
        if (qed_vp_wfq_rt_init(p_hwfn, p_params->num_vports, vport_params))
                return -1;
 
+       /* Set VPORT RL */
        if (qed_vport_rl_rt_init(p_hwfn, p_params->start_vport,
-                                p_params->num_vports, vport_params))
+                                p_params->num_vports, p_params->link_speed,
+                                vport_params))
                return -1;
 
        return 0;
@@ -729,6 +901,7 @@ int qed_init_pf_wfq(struct qed_hwfn *p_hwfn,
        }
 
        qed_wr(p_hwfn, p_ptt, QM_REG_WFQPFWEIGHT + pf_id * 4, inc_val);
+
        return 0;
 }
 
@@ -737,14 +910,13 @@ int qed_init_pf_rl(struct qed_hwfn *p_hwfn,
 {
        u32 inc_val = QM_RL_INC_VAL(pf_rl);
 
-       if (inc_val > QM_RL_MAX_INC_VAL) {
+       if (inc_val > QM_PF_RL_MAX_INC_VAL) {
                DP_NOTICE(p_hwfn, "Invalid PF rate limit configuration\n");
                return -1;
        }
 
-       qed_wr(p_hwfn, p_ptt,
-              QM_REG_RLPFCRD + pf_id * 4,
-              QM_RL_CRD_REG_SIGN_BIT);
+       qed_wr(p_hwfn,
+              p_ptt, QM_REG_RLPFCRD + pf_id * 4, (u32)QM_RL_CRD_REG_SIGN_BIT);
        qed_wr(p_hwfn, p_ptt, QM_REG_RLPFINCVAL + pf_id * 4, inc_val);
 
        return 0;
@@ -767,33 +939,35 @@ int qed_init_vport_wfq(struct qed_hwfn *p_hwfn,
        for (tc = 0; tc < NUM_OF_TCS; tc++) {
                vport_pq_id = first_tx_pq_id[tc];
                if (vport_pq_id != QM_INVALID_PQ_ID)
-                       qed_wr(p_hwfn, p_ptt,
-                              QM_REG_WFQVPWEIGHT + vport_pq_id * 4,
-                              inc_val);
+                       qed_wr(p_hwfn,
+                              p_ptt,
+                              QM_REG_WFQVPWEIGHT + vport_pq_id * 4, inc_val);
        }
 
        return 0;
 }
 
 int qed_init_vport_rl(struct qed_hwfn *p_hwfn,
-                     struct qed_ptt *p_ptt, u8 vport_id, u32 vport_rl)
+                     struct qed_ptt *p_ptt,
+                     u8 vport_id, u32 vport_rl, u32 link_speed)
 {
-       u32 inc_val = QM_RL_INC_VAL(vport_rl);
+       u32 inc_val, max_qm_global_rls = MAX_QM_GLOBAL_RLS;
 
-       if (vport_id >= MAX_QM_GLOBAL_RLS) {
+       if (vport_id >= max_qm_global_rls) {
                DP_NOTICE(p_hwfn,
                          "Invalid VPORT ID for rate limiter configuration\n");
                return -1;
        }
 
-       if (inc_val > QM_RL_MAX_INC_VAL) {
+       inc_val = QM_RL_INC_VAL(vport_rl ? vport_rl : link_speed);
+       if (inc_val > QM_VP_RL_MAX_INC_VAL(link_speed)) {
                DP_NOTICE(p_hwfn, "Invalid VPORT rate-limit configuration\n");
                return -1;
        }
 
-       qed_wr(p_hwfn, p_ptt,
-              QM_REG_RLGLBLCRD + vport_id * 4,
-              QM_RL_CRD_REG_SIGN_BIT);
+       qed_wr(p_hwfn,
+              p_ptt,
+              QM_REG_RLGLBLCRD + vport_id * 4, (u32)QM_RL_CRD_REG_SIGN_BIT);
        qed_wr(p_hwfn, p_ptt, QM_REG_RLGLBLINCVAL + vport_id * 4, inc_val);
 
        return 0;
@@ -805,23 +979,27 @@ bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
                          bool is_tx_pq, u16 start_pq, u16 num_pqs)
 {
        u32 cmd_arr[QM_CMD_STRUCT_SIZE(QM_STOP_CMD)] = { 0 };
-       u32 pq_mask = 0, last_pq = start_pq + num_pqs - 1, pq_id;
+       u32 pq_mask = 0, last_pq, pq_id;
+
+       last_pq = start_pq + num_pqs - 1;
 
        /* Set command's PQ type */
        QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, PQ_TYPE, is_tx_pq ? 0 : 1);
 
+       /* Go over requested PQs */
        for (pq_id = start_pq; pq_id <= last_pq; pq_id++) {
                /* Set PQ bit in mask (stop command only) */
                if (!is_release_cmd)
-                       pq_mask |= (1 << (pq_id % QM_STOP_PQ_MASK_WIDTH));
+                       pq_mask |= BIT((pq_id % QM_STOP_PQ_MASK_WIDTH));
 
                /* If last PQ or end of PQ mask, write command */
                if ((pq_id == last_pq) ||
                    (pq_id % QM_STOP_PQ_MASK_WIDTH ==
                     (QM_STOP_PQ_MASK_WIDTH - 1))) {
-                       QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD,
-                                        PAUSE_MASK, pq_mask);
-                       QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD,
+                       QM_CMD_SET_FIELD(cmd_arr,
+                                        QM_STOP_CMD, PAUSE_MASK, pq_mask);
+                       QM_CMD_SET_FIELD(cmd_arr,
+                                        QM_STOP_CMD,
                                         GROUP_ID,
                                         pq_id / QM_STOP_PQ_MASK_WIDTH);
                        if (!qed_send_qm_cmd(p_hwfn, p_ptt, QM_STOP_CMD_ADDR,
@@ -834,87 +1012,103 @@ bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
        return true;
 }
 
-static void
-qed_set_tunnel_type_enable_bit(unsigned long *var, int bit, bool enable)
-{
-       if (enable)
-               set_bit(bit, var);
-       else
-               clear_bit(bit, var);
-}
 
+#define SET_TUNNEL_TYPE_ENABLE_BIT(var, offset, enable) \
+       do { \
+               typeof(var) *__p_var = &(var); \
+               typeof(offset) __offset = offset; \
+               *__p_var = (*__p_var & ~BIT(__offset)) | \
+                          ((enable) ? BIT(__offset) : 0); \
+       } while (0)
 #define PRS_ETH_TUNN_FIC_FORMAT        -188897008
 
 void qed_set_vxlan_dest_port(struct qed_hwfn *p_hwfn,
                             struct qed_ptt *p_ptt, u16 dest_port)
 {
+       /* Update PRS register */
        qed_wr(p_hwfn, p_ptt, PRS_REG_VXLAN_PORT, dest_port);
+
+       /* Update NIG register */
        qed_wr(p_hwfn, p_ptt, NIG_REG_VXLAN_CTRL, dest_port);
+
+       /* Update PBF register */
        qed_wr(p_hwfn, p_ptt, PBF_REG_VXLAN_PORT, dest_port);
 }
 
 void qed_set_vxlan_enable(struct qed_hwfn *p_hwfn,
                          struct qed_ptt *p_ptt, bool vxlan_enable)
 {
-       unsigned long reg_val = 0;
+       u32 reg_val;
        u8 shift;
 
+       /* Update PRS register */
        reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
        shift = PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT;
-       qed_set_tunnel_type_enable_bit(&reg_val, shift, vxlan_enable);
-
+       SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, vxlan_enable);
        qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
-
        if (reg_val)
-               qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
-                      PRS_ETH_TUNN_FIC_FORMAT);
+               qed_wr(p_hwfn,
+                      p_ptt,
+                      PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
+                      (u32)PRS_ETH_TUNN_FIC_FORMAT);
 
+       /* Update NIG register */
        reg_val = qed_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
        shift = NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE_SHIFT;
-       qed_set_tunnel_type_enable_bit(&reg_val, shift, vxlan_enable);
-
+       SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, vxlan_enable);
        qed_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);
 
-       qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN,
-              vxlan_enable ? 1 : 0);
+       /* Update DORQ register */
+       qed_wr(p_hwfn,
+              p_ptt, DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN, vxlan_enable ? 1 : 0);
 }
 
-void qed_set_gre_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+void qed_set_gre_enable(struct qed_hwfn *p_hwfn,
+                       struct qed_ptt *p_ptt,
                        bool eth_gre_enable, bool ip_gre_enable)
 {
-       unsigned long reg_val = 0;
+       u32 reg_val;
        u8 shift;
 
+       /* Update PRS register */
        reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
        shift = PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT;
-       qed_set_tunnel_type_enable_bit(&reg_val, shift, eth_gre_enable);
-
+       SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, eth_gre_enable);
        shift = PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT;
-       qed_set_tunnel_type_enable_bit(&reg_val, shift, ip_gre_enable);
+       SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, ip_gre_enable);
        qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
        if (reg_val)
-               qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
-                      PRS_ETH_TUNN_FIC_FORMAT);
+               qed_wr(p_hwfn,
+                      p_ptt,
+                      PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
+                      (u32)PRS_ETH_TUNN_FIC_FORMAT);
 
+       /* Update NIG register */
        reg_val = qed_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
        shift = NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE_SHIFT;
-       qed_set_tunnel_type_enable_bit(&reg_val, shift, eth_gre_enable);
-
+       SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, eth_gre_enable);
        shift = NIG_REG_ENC_TYPE_ENABLE_IP_OVER_GRE_ENABLE_SHIFT;
-       qed_set_tunnel_type_enable_bit(&reg_val, shift, ip_gre_enable);
+       SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, ip_gre_enable);
        qed_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);
 
-       qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN,
-              eth_gre_enable ? 1 : 0);
-       qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN,
-              ip_gre_enable ? 1 : 0);
+       /* Update DORQ registers */
+       qed_wr(p_hwfn,
+              p_ptt,
+              DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN, eth_gre_enable ? 1 : 0);
+       qed_wr(p_hwfn,
+              p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN, ip_gre_enable ? 1 : 0);
 }
 
 void qed_set_geneve_dest_port(struct qed_hwfn *p_hwfn,
                              struct qed_ptt *p_ptt, u16 dest_port)
 {
+       /* Update PRS register */
        qed_wr(p_hwfn, p_ptt, PRS_REG_NGE_PORT, dest_port);
+
+       /* Update NIG register */
        qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_PORT, dest_port);
+
+       /* Update PBF register */
        qed_wr(p_hwfn, p_ptt, PBF_REG_NGE_PORT, dest_port);
 }
 
@@ -922,32 +1116,39 @@ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
                           struct qed_ptt *p_ptt,
                           bool eth_geneve_enable, bool ip_geneve_enable)
 {
-       unsigned long reg_val = 0;
+       u32 reg_val;
        u8 shift;
 
+       /* Update PRS register */
        reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
        shift = PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT;
-       qed_set_tunnel_type_enable_bit(&reg_val, shift, eth_geneve_enable);
-
+       SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, eth_geneve_enable);
        shift = PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT;
-       qed_set_tunnel_type_enable_bit(&reg_val, shift, ip_geneve_enable);
-
+       SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, ip_geneve_enable);
        qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
        if (reg_val)
-               qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
-                      PRS_ETH_TUNN_FIC_FORMAT);
+               qed_wr(p_hwfn,
+                      p_ptt,
+                      PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
+                      (u32)PRS_ETH_TUNN_FIC_FORMAT);
 
+       /* Update NIG register */
        qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_ETH_ENABLE,
               eth_geneve_enable ? 1 : 0);
        qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_IP_ENABLE, ip_geneve_enable ? 1 : 0);
 
-       /* EDPM with geneve tunnel not supported in BB_B0 */
+       /* EDPM with geneve tunnel not supported in BB */
        if (QED_IS_BB_B0(p_hwfn->cdev))
                return;
 
-       qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN,
+       /* Update DORQ registers */
+       qed_wr(p_hwfn,
+              p_ptt,
+              DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN_K2_E5,
               eth_geneve_enable ? 1 : 0);
-       qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN,
+       qed_wr(p_hwfn,
+              p_ptt,
+              DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN_K2_E5,
               ip_geneve_enable ? 1 : 0);
 }
 
@@ -959,117 +1160,297 @@ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
 #define RAM_LINE_SIZE sizeof(u64)
 #define REG_SIZE sizeof(u32)
 
-void qed_set_rfs_mode_disable(struct qed_hwfn *p_hwfn,
-                             struct qed_ptt *p_ptt, u16 pf_id)
+void qed_gft_disable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 pf_id)
 {
-       u32 hw_addr = PRS_REG_GFT_PROFILE_MASK_RAM +
-                     pf_id * RAM_LINE_SIZE;
-
-       /*stop using gft logic */
+       /* Disable gft search for PF */
        qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 0);
-       qed_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, 0x0);
+
+       /* Clean ram & cam for next gft session */
+
+       /* Zero camline */
        qed_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id, 0);
-       qed_wr(p_hwfn, p_ptt, hw_addr, 0);
-       qed_wr(p_hwfn, p_ptt, hw_addr + 4, 0);
+
+       /* Zero ramline */
+       qed_wr(p_hwfn,
+              p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id, 0);
+       qed_wr(p_hwfn,
+              p_ptt,
+              PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id + REG_SIZE,
+              0);
 }
 
-void qed_set_rfs_mode_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
-                            u16 pf_id, bool tcp, bool udp,
-                            bool ipv4, bool ipv6)
+void qed_set_gft_event_id_cm_hdr(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 {
-       union gft_cam_line_union camline;
-       struct gft_ram_line ramline;
        u32 rfs_cm_hdr_event_id;
 
+       /* Set RFS event ID to be awakened i Tstorm By Prs */
        rfs_cm_hdr_event_id = qed_rd(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT);
+       rfs_cm_hdr_event_id |= T_ETH_PACKET_ACTION_GFT_EVENTID <<
+                              PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT;
+       rfs_cm_hdr_event_id |= PARSER_ETH_CONN_GFT_ACTION_CM_HDR <<
+                              PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT;
+       qed_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, rfs_cm_hdr_event_id);
+}
+
+void qed_gft_config(struct qed_hwfn *p_hwfn,
+                   struct qed_ptt *p_ptt,
+                   u16 pf_id,
+                   bool tcp,
+                   bool udp,
+                   bool ipv4, bool ipv6, enum gft_profile_type profile_type)
+{
+       u32 reg_val, cam_line, ram_line_lo, ram_line_hi;
 
        if (!ipv6 && !ipv4)
                DP_NOTICE(p_hwfn,
-                         "set_rfs_mode_enable: must accept at least on of - ipv4 or ipv6");
+                         "gft_config: must accept at least on of - ipv4 or ipv6'\n");
        if (!tcp && !udp)
                DP_NOTICE(p_hwfn,
-                         "set_rfs_mode_enable: must accept at least on of - udp or tcp");
+                         "gft_config: must accept at least on of - udp or tcp\n");
+       if (profile_type >= MAX_GFT_PROFILE_TYPE)
+               DP_NOTICE(p_hwfn, "gft_config: unsupported gft_profile_type\n");
 
-       rfs_cm_hdr_event_id |= T_ETH_PACKET_MATCH_RFS_EVENTID <<
-                                       PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT;
-       rfs_cm_hdr_event_id |= PARSER_ETH_CONN_CM_HDR <<
-                                       PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT;
-       qed_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, rfs_cm_hdr_event_id);
+       /* Set RFS event ID to be awakened i Tstorm By Prs */
+       reg_val = T_ETH_PACKET_MATCH_RFS_EVENTID <<
+                 PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT;
+       reg_val |= PARSER_ETH_CONN_CM_HDR << PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT;
+       qed_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, reg_val);
 
-       /* Configure Registers for RFS mode */
-       qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 1);
+       /* Do not load context only cid in PRS on match. */
        qed_wr(p_hwfn, p_ptt, PRS_REG_LOAD_L2_FILTER, 0);
-       camline.cam_line_mapped.camline = 0;
 
-       /* Cam line is now valid!! */
-       SET_FIELD(camline.cam_line_mapped.camline,
-                 GFT_CAM_LINE_MAPPED_VALID, 1);
+       /* Do not use tenant ID exist bit for gft search */
+       qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TENANT_ID, 0);
 
-       /* filters are per PF!! */
-       SET_FIELD(camline.cam_line_mapped.camline,
+       /* Set Cam */
+       cam_line = 0;
+       SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_VALID, 1);
+
+       /* Filters are per PF!! */
+       SET_FIELD(cam_line,
                  GFT_CAM_LINE_MAPPED_PF_ID_MASK,
                  GFT_CAM_LINE_MAPPED_PF_ID_MASK_MASK);
-       SET_FIELD(camline.cam_line_mapped.camline,
-                 GFT_CAM_LINE_MAPPED_PF_ID, pf_id);
+       SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_PF_ID, pf_id);
+
        if (!(tcp && udp)) {
-               SET_FIELD(camline.cam_line_mapped.camline,
+               SET_FIELD(cam_line,
                          GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK,
                          GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_MASK);
                if (tcp)
-                       SET_FIELD(camline.cam_line_mapped.camline,
+                       SET_FIELD(cam_line,
                                  GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE,
                                  GFT_PROFILE_TCP_PROTOCOL);
                else
-                       SET_FIELD(camline.cam_line_mapped.camline,
+                       SET_FIELD(cam_line,
                                  GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE,
                                  GFT_PROFILE_UDP_PROTOCOL);
        }
 
        if (!(ipv4 && ipv6)) {
-               SET_FIELD(camline.cam_line_mapped.camline,
-                         GFT_CAM_LINE_MAPPED_IP_VERSION_MASK, 1);
+               SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_IP_VERSION_MASK, 1);
                if (ipv4)
-                       SET_FIELD(camline.cam_line_mapped.camline,
+                       SET_FIELD(cam_line,
                                  GFT_CAM_LINE_MAPPED_IP_VERSION,
                                  GFT_PROFILE_IPV4);
                else
-                       SET_FIELD(camline.cam_line_mapped.camline,
+                       SET_FIELD(cam_line,
                                  GFT_CAM_LINE_MAPPED_IP_VERSION,
                                  GFT_PROFILE_IPV6);
        }
 
        /* Write characteristics to cam */
        qed_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id,
-              camline.cam_line_mapped.camline);
-       camline.cam_line_mapped.camline = qed_rd(p_hwfn, p_ptt,
-                                                PRS_REG_GFT_CAM +
-                                                CAM_LINE_SIZE * pf_id);
+              cam_line);
+       cam_line =
+           qed_rd(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id);
 
        /* Write line to RAM - compare to filter 4 tuple */
-       ramline.lo = 0;
-       ramline.hi = 0;
-       SET_FIELD(ramline.hi, GFT_RAM_LINE_DST_IP, 1);
-       SET_FIELD(ramline.hi, GFT_RAM_LINE_SRC_IP, 1);
-       SET_FIELD(ramline.hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
-       SET_FIELD(ramline.lo, GFT_RAM_LINE_ETHERTYPE, 1);
-       SET_FIELD(ramline.lo, GFT_RAM_LINE_SRC_PORT, 1);
-       SET_FIELD(ramline.lo, GFT_RAM_LINE_DST_PORT, 1);
-
-       /* Each iteration write to reg */
-       qed_wr(p_hwfn, p_ptt,
+       ram_line_lo = 0;
+       ram_line_hi = 0;
+
+       if (profile_type == GFT_PROFILE_TYPE_4_TUPLE) {
+               SET_FIELD(ram_line_hi, GFT_RAM_LINE_DST_IP, 1);
+               SET_FIELD(ram_line_hi, GFT_RAM_LINE_SRC_IP, 1);
+               SET_FIELD(ram_line_hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
+               SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
+               SET_FIELD(ram_line_lo, GFT_RAM_LINE_SRC_PORT, 1);
+               SET_FIELD(ram_line_lo, GFT_RAM_LINE_DST_PORT, 1);
+       } else if (profile_type == GFT_PROFILE_TYPE_L4_DST_PORT) {
+               SET_FIELD(ram_line_hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
+               SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
+               SET_FIELD(ram_line_lo, GFT_RAM_LINE_DST_PORT, 1);
+       } else if (profile_type == GFT_PROFILE_TYPE_IP_DST_PORT) {
+               SET_FIELD(ram_line_hi, GFT_RAM_LINE_DST_IP, 1);
+               SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
+       }
+
+       qed_wr(p_hwfn,
+              p_ptt,
               PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id,
-              ramline.lo);
-       qed_wr(p_hwfn, p_ptt,
-              PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id + 4,
-              ramline.hi);
+              ram_line_lo);
+       qed_wr(p_hwfn,
+              p_ptt,
+              PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id + REG_SIZE,
+              ram_line_hi);
 
        /* Set default profile so that no filter match will happen */
-       qed_wr(p_hwfn, p_ptt,
-              PRS_REG_GFT_PROFILE_MASK_RAM +
-              RAM_LINE_SIZE * PRS_GFT_CAM_LINES_NO_MATCH,
-              ramline.lo);
-       qed_wr(p_hwfn, p_ptt,
-              PRS_REG_GFT_PROFILE_MASK_RAM +
-              RAM_LINE_SIZE * PRS_GFT_CAM_LINES_NO_MATCH + 4,
-              ramline.hi);
+       qed_wr(p_hwfn,
+              p_ptt,
+              PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE *
+              PRS_GFT_CAM_LINES_NO_MATCH, 0xffffffff);
+       qed_wr(p_hwfn,
+              p_ptt,
+              PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE *
+              PRS_GFT_CAM_LINES_NO_MATCH + REG_SIZE, 0x3ff);
+
+       /* Enable gft search */
+       qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 1);
+}
+
+DECLARE_CRC8_TABLE(cdu_crc8_table);
+
+/* Calculate and return CDU validation byte per connection type/region/cid */
+static u8 qed_calc_cdu_validation_byte(u8 conn_type, u8 region, u32 cid)
+{
+       const u8 validation_cfg = CDU_VALIDATION_DEFAULT_CFG;
+       u8 crc, validation_byte = 0;
+       static u8 crc8_table_valid; /* automatically initialized to 0 */
+       u32 validation_string = 0;
+       u32 data_to_crc;
+
+       if (!crc8_table_valid) {
+               crc8_populate_msb(cdu_crc8_table, 0x07);
+               crc8_table_valid = 1;
+       }
+
+       /* The CRC is calculated on the String-to-compress:
+        * [31:8]  = {CID[31:20],CID[11:0]}
+        * [7:4]   = Region
+        * [3:0]   = Type
+        */
+       if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_CID) & 1)
+               validation_string |= (cid & 0xFFF00000) | ((cid & 0xFFF) << 8);
+
+       if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_REGION) & 1)
+               validation_string |= ((region & 0xF) << 4);
+
+       if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_TYPE) & 1)
+               validation_string |= (conn_type & 0xF);
+
+       /* Convert to big-endian and calculate CRC8 */
+       data_to_crc = be32_to_cpu(validation_string);
+
+       crc = crc8(cdu_crc8_table,
+                  (u8 *)&data_to_crc, sizeof(data_to_crc), CRC8_INIT_VALUE);
+
+       /* The validation byte [7:0] is composed:
+        * for type A validation
+        * [7]          = active configuration bit
+        * [6:0]        = crc[6:0]
+        *
+        * for type B validation
+        * [7]          = active configuration bit
+        * [6:3]        = connection_type[3:0]
+        * [2:0]        = crc[2:0]
+        */
+       validation_byte |=
+           ((validation_cfg >>
+             CDU_CONTEXT_VALIDATION_CFG_USE_ACTIVE) & 1) << 7;
+
+       if ((validation_cfg >>
+            CDU_CONTEXT_VALIDATION_CFG_VALIDATION_TYPE_SHIFT) & 1)
+               validation_byte |= ((conn_type & 0xF) << 3) | (crc & 0x7);
+       else
+               validation_byte |= crc & 0x7F;
+
+       return validation_byte;
+}
+
+/* Calcualte and set validation bytes for session context */
+void qed_calc_session_ctx_validation(void *p_ctx_mem,
+                                    u16 ctx_size, u8 ctx_type, u32 cid)
+{
+       u8 *x_val_ptr, *t_val_ptr, *u_val_ptr, *p_ctx;
+
+       p_ctx = (u8 * const)p_ctx_mem;
+       x_val_ptr = &p_ctx[con_region_offsets[0][ctx_type]];
+       t_val_ptr = &p_ctx[con_region_offsets[1][ctx_type]];
+       u_val_ptr = &p_ctx[con_region_offsets[2][ctx_type]];
+
+       memset(p_ctx, 0, ctx_size);
+
+       *x_val_ptr = qed_calc_cdu_validation_byte(ctx_type, 3, cid);
+       *t_val_ptr = qed_calc_cdu_validation_byte(ctx_type, 4, cid);
+       *u_val_ptr = qed_calc_cdu_validation_byte(ctx_type, 5, cid);
+}
+
+/* Calcualte and set validation bytes for task context */
+void qed_calc_task_ctx_validation(void *p_ctx_mem,
+                                 u16 ctx_size, u8 ctx_type, u32 tid)
+{
+       u8 *p_ctx, *region1_val_ptr;
+
+       p_ctx = (u8 * const)p_ctx_mem;
+       region1_val_ptr = &p_ctx[task_region_offsets[0][ctx_type]];
+
+       memset(p_ctx, 0, ctx_size);
+
+       *region1_val_ptr = qed_calc_cdu_validation_byte(ctx_type, 1, tid);
+}
+
+/* Memset session context to 0 while preserving validation bytes */
+void qed_memset_session_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type)
+{
+       u8 *x_val_ptr, *t_val_ptr, *u_val_ptr, *p_ctx;
+       u8 x_val, t_val, u_val;
+
+       p_ctx = (u8 * const)p_ctx_mem;
+       x_val_ptr = &p_ctx[con_region_offsets[0][ctx_type]];
+       t_val_ptr = &p_ctx[con_region_offsets[1][ctx_type]];
+       u_val_ptr = &p_ctx[con_region_offsets[2][ctx_type]];
+
+       x_val = *x_val_ptr;
+       t_val = *t_val_ptr;
+       u_val = *u_val_ptr;
+
+       memset(p_ctx, 0, ctx_size);
+
+       *x_val_ptr = x_val;
+       *t_val_ptr = t_val;
+       *u_val_ptr = u_val;
+}
+
+/* Memset task context to 0 while preserving validation bytes */
+void qed_memset_task_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type)
+{
+       u8 *p_ctx, *region1_val_ptr;
+       u8 region1_val;
+
+       p_ctx = (u8 * const)p_ctx_mem;
+       region1_val_ptr = &p_ctx[task_region_offsets[0][ctx_type]];
+
+       region1_val = *region1_val_ptr;
+
+       memset(p_ctx, 0, ctx_size);
+
+       *region1_val_ptr = region1_val;
+}
+
+/* Enable and configure context validation */
+void qed_enable_context_validation(struct qed_hwfn *p_hwfn,
+                                  struct qed_ptt *p_ptt)
+{
+       u32 ctx_validation;
+
+       /* Enable validation for connection region 3: CCFC_CTX_VALID0[31:24] */
+       ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 24;
+       qed_wr(p_hwfn, p_ptt, CDU_REG_CCFC_CTX_VALID0, ctx_validation);
+
+       /* Enable validation for connection region 5: CCFC_CTX_VALID1[15:8] */
+       ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 8;
+       qed_wr(p_hwfn, p_ptt, CDU_REG_CCFC_CTX_VALID1, ctx_validation);
+
+       /* Enable validation for connection region 1: TCFC_CTX_VALID0[15:8] */
+       ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 8;
+       qed_wr(p_hwfn, p_ptt, CDU_REG_TCFC_CTX_VALID0, ctx_validation);
 }
index e3f3688..3bb76da 100644 (file)
@@ -414,11 +414,23 @@ static void qed_init_cmd_rd(struct qed_hwfn *p_hwfn,
 }
 
 /* init_ops callbacks entry point */
-static void qed_init_cmd_cb(struct qed_hwfn *p_hwfn,
-                           struct qed_ptt *p_ptt,
-                           struct init_callback_op *p_cmd)
+static int qed_init_cmd_cb(struct qed_hwfn *p_hwfn,
+                          struct qed_ptt *p_ptt,
+                          struct init_callback_op *p_cmd)
 {
-       DP_NOTICE(p_hwfn, "Currently init values have no need of callbacks\n");
+       int rc;
+
+       switch (p_cmd->callback_id) {
+       case DMAE_READY_CB:
+               rc = qed_dmae_sanity(p_hwfn, p_ptt, "engine_phase");
+               break;
+       default:
+               DP_NOTICE(p_hwfn, "Unexpected init op callback ID %d\n",
+                         p_cmd->callback_id);
+               return -EINVAL;
+       }
+
+       return rc;
 }
 
 static u8 qed_init_cmd_mode_match(struct qed_hwfn *p_hwfn,
@@ -519,7 +531,7 @@ int qed_init_run(struct qed_hwfn *p_hwfn,
                        break;
 
                case INIT_OP_CALLBACK:
-                       qed_init_cmd_cb(p_hwfn, p_ptt, &cmd->callback);
+                       rc = qed_init_cmd_cb(p_hwfn, p_ptt, &cmd->callback);
                        break;
                }
 
index 719cdbf..d3eabcf 100644 (file)
@@ -59,10 +59,10 @@ struct qed_pi_info {
 };
 
 struct qed_sb_sp_info {
-       struct qed_sb_info      sb_info;
+       struct qed_sb_info sb_info;
 
        /* per protocol index data */
-       struct qed_pi_info      pi_info_arr[PIS_PER_SB];
+       struct qed_pi_info pi_info_arr[PIS_PER_SB_E4];
 };
 
 enum qed_attention_type {
@@ -82,7 +82,7 @@ struct aeu_invert_reg_bit {
 #define ATTENTION_LENGTH_SHIFT          (4)
 #define ATTENTION_LENGTH(flags)         (((flags) & ATTENTION_LENGTH_MASK) >> \
                                         ATTENTION_LENGTH_SHIFT)
-#define ATTENTION_SINGLE                (1 << ATTENTION_LENGTH_SHIFT)
+#define ATTENTION_SINGLE                BIT(ATTENTION_LENGTH_SHIFT)
 #define ATTENTION_PAR                   (ATTENTION_SINGLE | ATTENTION_PARITY)
 #define ATTENTION_PAR_INT               ((2 << ATTENTION_LENGTH_SHIFT) | \
                                         ATTENTION_PARITY)
@@ -1313,7 +1313,7 @@ static void qed_int_cau_conf_pi(struct qed_hwfn *p_hwfn,
        if (IS_VF(p_hwfn->cdev))
                return;
 
-       sb_offset = igu_sb_id * PIS_PER_SB;
+       sb_offset = igu_sb_id * PIS_PER_SB_E4;
        memset(&pi_entry, 0, sizeof(struct cau_pi_entry));
 
        SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_PI_TIMESET, timeset);
index 5199634..54b4ee0 100644 (file)
@@ -197,7 +197,7 @@ void qed_int_disable_post_isr_release(struct qed_dev *cdev);
 #define QED_SB_EVENT_MASK       0x0003
 
 #define SB_ALIGNED_SIZE(p_hwfn)        \
-       ALIGNED_TYPE_SIZE(struct status_block, p_hwfn)
+       ALIGNED_TYPE_SIZE(struct status_block_e4, p_hwfn)
 
 #define QED_SB_INVALID_IDX      0xffff
 
index 813c77c..c0d4a54 100644 (file)
 #include "qed_sriov.h"
 #include "qed_reg_addr.h"
 
-static int
-qed_iscsi_async_event(struct qed_hwfn *p_hwfn,
-                     u8 fw_event_code,
-                     u16 echo, union event_ring_data *data, u8 fw_return_code)
-{
-       if (p_hwfn->p_iscsi_info->event_cb) {
-               struct qed_iscsi_info *p_iscsi = p_hwfn->p_iscsi_info;
-
-               return p_iscsi->event_cb(p_iscsi->event_context,
-                                        fw_event_code, data);
-       } else {
-               DP_NOTICE(p_hwfn, "iSCSI async completion is not set\n");
-               return -EINVAL;
-       }
-}
-
 struct qed_iscsi_conn {
        struct list_head list_entry;
        bool free_on_delete;
@@ -105,7 +89,7 @@ struct qed_iscsi_conn {
        u8 local_mac[6];
        u8 remote_mac[6];
        u16 vlan_id;
-       u8 tcp_flags;
+       u16 tcp_flags;
        u8 ip_version;
        u32 remote_ip[4];
        u32 local_ip[4];
@@ -122,7 +106,6 @@ struct qed_iscsi_conn {
        u32 ss_thresh;
        u16 srtt;
        u16 rtt_var;
-       u32 ts_time;
        u32 ts_recent;
        u32 ts_recent_age;
        u32 total_rt;
@@ -144,7 +127,6 @@ struct qed_iscsi_conn {
        u16 mss;
        u8 snd_wnd_scale;
        u8 rcv_wnd_scale;
-       u32 ts_ticks_per_second;
        u16 da_timeout_value;
        u8 ack_frequency;
 
@@ -162,6 +144,22 @@ struct qed_iscsi_conn {
 };
 
 static int
+qed_iscsi_async_event(struct qed_hwfn *p_hwfn,
+                     u8 fw_event_code,
+                     u16 echo, union event_ring_data *data, u8 fw_return_code)
+{
+       if (p_hwfn->p_iscsi_info->event_cb) {
+               struct qed_iscsi_info *p_iscsi = p_hwfn->p_iscsi_info;
+
+               return p_iscsi->event_cb(p_iscsi->event_context,
+                                        fw_event_code, data);
+       } else {
+               DP_NOTICE(p_hwfn, "iSCSI async completion is not set\n");
+               return -EINVAL;
+       }
+}
+
+static int
 qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn,
                        enum spq_mode comp_mode,
                        struct qed_spq_comp_cb *p_comp_addr,
@@ -214,9 +212,9 @@ qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn,
        p_init->num_sq_pages_in_ring = p_params->num_sq_pages_in_ring;
        p_init->num_r2tq_pages_in_ring = p_params->num_r2tq_pages_in_ring;
        p_init->num_uhq_pages_in_ring = p_params->num_uhq_pages_in_ring;
-       p_init->ooo_enable = p_params->ooo_enable;
        p_init->ll2_rx_queue_id = p_hwfn->hw_info.resc_start[QED_LL2_QUEUE] +
                                  p_params->ll2_ooo_queue_id;
+
        p_init->func_params.log_page_size = p_params->log_page_size;
        val = p_params->num_tasks;
        p_init->func_params.num_tasks = cpu_to_le16(val);
@@ -276,7 +274,7 @@ qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn,
        p_ramrod->tcp_init.two_msl_timer = cpu_to_le32(p_params->two_msl_timer);
        val = p_params->tx_sws_timer;
        p_ramrod->tcp_init.tx_sws_timer = cpu_to_le16(val);
-       p_ramrod->tcp_init.maxfinrt = p_params->max_fin_rt;
+       p_ramrod->tcp_init.max_fin_rt = p_params->max_fin_rt;
 
        p_hwfn->p_iscsi_info->event_context = event_context;
        p_hwfn->p_iscsi_info->event_cb = async_event_cb;
@@ -304,8 +302,8 @@ static int qed_sp_iscsi_conn_offload(struct qed_hwfn *p_hwfn,
        int rc = 0;
        u32 dval;
        u16 wval;
-       u8 i;
        u16 *p;
+       u8 i;
 
        /* Get SPQ entry */
        memset(&init_data, 0, sizeof(init_data));
@@ -371,7 +369,7 @@ static int qed_sp_iscsi_conn_offload(struct qed_hwfn *p_hwfn,
 
                p_tcp->vlan_id = cpu_to_le16(p_conn->vlan_id);
 
-               p_tcp->flags = p_conn->tcp_flags;
+               p_tcp->flags = cpu_to_le16(p_conn->tcp_flags);
                p_tcp->ip_version = p_conn->ip_version;
                for (i = 0; i < 4; i++) {
                        dval = p_conn->remote_ip[i];
@@ -436,7 +434,7 @@ static int qed_sp_iscsi_conn_offload(struct qed_hwfn *p_hwfn,
                p_tcp2->remote_mac_addr_lo = swab16(get_unaligned(p + 2));
 
                p_tcp2->vlan_id = cpu_to_le16(p_conn->vlan_id);
-               p_tcp2->flags = p_conn->tcp_flags;
+               p_tcp2->flags = cpu_to_le16(p_conn->tcp_flags);
 
                p_tcp2->ip_version = p_conn->ip_version;
                for (i = 0; i < 4; i++) {
@@ -458,6 +456,11 @@ static int qed_sp_iscsi_conn_offload(struct qed_hwfn *p_hwfn,
                p_tcp2->syn_ip_payload_length = cpu_to_le16(wval);
                p_tcp2->syn_phy_addr_lo = DMA_LO_LE(p_conn->syn_phy_addr);
                p_tcp2->syn_phy_addr_hi = DMA_HI_LE(p_conn->syn_phy_addr);
+               p_tcp2->cwnd = cpu_to_le32(p_conn->cwnd);
+               p_tcp2->ka_max_probe_cnt = p_conn->ka_probe_cnt;
+               p_tcp2->ka_timeout = cpu_to_le32(p_conn->ka_timeout);
+               p_tcp2->max_rt_time = cpu_to_le32(p_conn->max_rt_time);
+               p_tcp2->ka_interval = cpu_to_le32(p_conn->ka_interval);
        }
 
        return qed_spq_post(p_hwfn, p_ent, NULL);
@@ -692,8 +695,7 @@ static void __iomem *qed_iscsi_get_secondary_bdq_prod(struct qed_hwfn *p_hwfn,
        }
 }
 
-static int qed_iscsi_setup_connection(struct qed_hwfn *p_hwfn,
-                                     struct qed_iscsi_conn *p_conn)
+static int qed_iscsi_setup_connection(struct qed_iscsi_conn *p_conn)
 {
        if (!p_conn->queue_cnts_virt_addr)
                goto nomem;
@@ -844,7 +846,7 @@ static int qed_iscsi_acquire_connection(struct qed_hwfn *p_hwfn,
                rc = qed_iscsi_allocate_connection(p_hwfn, &p_conn);
 
        if (!rc)
-               rc = qed_iscsi_setup_connection(p_hwfn, p_conn);
+               rc = qed_iscsi_setup_connection(p_conn);
 
        if (rc) {
                spin_lock_bh(&p_hwfn->p_iscsi_info->lock);
@@ -1294,7 +1296,6 @@ static int qed_iscsi_offload_conn(struct qed_dev *cdev,
        con->ss_thresh = conn_info->ss_thresh;
        con->srtt = conn_info->srtt;
        con->rtt_var = conn_info->rtt_var;
-       con->ts_time = conn_info->ts_time;
        con->ts_recent = conn_info->ts_recent;
        con->ts_recent_age = conn_info->ts_recent_age;
        con->total_rt = conn_info->total_rt;
@@ -1316,7 +1317,6 @@ static int qed_iscsi_offload_conn(struct qed_dev *cdev,
        con->mss = conn_info->mss;
        con->snd_wnd_scale = conn_info->snd_wnd_scale;
        con->rcv_wnd_scale = conn_info->rcv_wnd_scale;
-       con->ts_ticks_per_second = conn_info->ts_ticks_per_second;
        con->da_timeout_value = conn_info->da_timeout_value;
        con->ack_frequency = conn_info->ack_frequency;
 
index 409041e..ca4a81d 100644 (file)
@@ -64,14 +64,21 @@ struct mpa_v2_hdr {
 
 #define QED_IWARP_INVALID_TCP_CID      0xffffffff
 #define QED_IWARP_RCV_WND_SIZE_DEF     (256 * 1024)
-#define QED_IWARP_RCV_WND_SIZE_MIN     (64 * 1024)
+#define QED_IWARP_RCV_WND_SIZE_MIN     (0xffff)
 #define TIMESTAMP_HEADER_SIZE          (12)
+#define QED_IWARP_MAX_FIN_RT_DEFAULT   (2)
 
 #define QED_IWARP_TS_EN                        BIT(0)
 #define QED_IWARP_DA_EN                        BIT(1)
 #define QED_IWARP_PARAM_CRC_NEEDED     (1)
 #define QED_IWARP_PARAM_P2P            (1)
 
+#define QED_IWARP_DEF_MAX_RT_TIME      (0)
+#define QED_IWARP_DEF_CWND_FACTOR      (4)
+#define QED_IWARP_DEF_KA_MAX_PROBE_CNT (5)
+#define QED_IWARP_DEF_KA_TIMEOUT       (1200000)       /* 20 min */
+#define QED_IWARP_DEF_KA_INTERVAL      (1000)          /* 1 sec */
+
 static int qed_iwarp_async_event(struct qed_hwfn *p_hwfn,
                                 u8 fw_event_code, u16 echo,
                                 union event_ring_data *data,
@@ -120,11 +127,17 @@ static void qed_iwarp_cid_cleaned(struct qed_hwfn *p_hwfn, u32 cid)
        spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
 }
 
-void qed_iwarp_init_fw_ramrod(struct qed_hwfn *p_hwfn,
-                             struct iwarp_init_func_params *p_ramrod)
+void
+qed_iwarp_init_fw_ramrod(struct qed_hwfn *p_hwfn,
+                        struct iwarp_init_func_ramrod_data *p_ramrod)
 {
-       p_ramrod->ll2_ooo_q_index = RESC_START(p_hwfn, QED_LL2_QUEUE) +
-                                   p_hwfn->p_rdma_info->iwarp.ll2_ooo_handle;
+       p_ramrod->iwarp.ll2_ooo_q_index =
+               RESC_START(p_hwfn, QED_LL2_QUEUE) +
+               p_hwfn->p_rdma_info->iwarp.ll2_ooo_handle;
+
+       p_ramrod->tcp.max_fin_rt = QED_IWARP_MAX_FIN_RT_DEFAULT;
+
+       return;
 }
 
 static int qed_iwarp_alloc_cid(struct qed_hwfn *p_hwfn, u32 *cid)
@@ -699,6 +712,12 @@ qed_iwarp_tcp_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
        tcp->ttl = 0x40;
        tcp->tos_or_tc = 0;
 
+       tcp->max_rt_time = QED_IWARP_DEF_MAX_RT_TIME;
+       tcp->cwnd = QED_IWARP_DEF_CWND_FACTOR *  tcp->mss;
+       tcp->ka_max_probe_cnt = QED_IWARP_DEF_KA_MAX_PROBE_CNT;
+       tcp->ka_timeout = QED_IWARP_DEF_KA_TIMEOUT;
+       tcp->ka_interval = QED_IWARP_DEF_KA_INTERVAL;
+
        tcp->rcv_wnd_scale = (u8)p_hwfn->p_rdma_info->iwarp.rcv_wnd_scale;
        tcp->connect_mode = ep->connect_mode;
 
@@ -807,6 +826,7 @@ static int
 qed_iwarp_mpa_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
 {
        struct iwarp_mpa_offload_ramrod_data *p_mpa_ramrod;
+       struct qed_iwarp_info *iwarp_info;
        struct qed_sp_init_data init_data;
        dma_addr_t async_output_phys;
        struct qed_spq_entry *p_ent;
@@ -874,6 +894,8 @@ qed_iwarp_mpa_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
                p_mpa_ramrod->common.reject = 1;
        }
 
+       iwarp_info = &p_hwfn->p_rdma_info->iwarp;
+       p_mpa_ramrod->rcv_wnd = iwarp_info->rcv_wnd_size;
        p_mpa_ramrod->mode = ep->mpa_rev;
        SET_FIELD(p_mpa_ramrod->rtr_pref,
                  IWARP_MPA_OFFLOAD_RAMROD_DATA_RTR_SUPPORTED, ep->rtr_type);
@@ -2745,6 +2767,7 @@ int qed_iwarp_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
        /* value 0 is used for ilog2(QED_IWARP_RCV_WND_SIZE_MIN) */
        iwarp_info->rcv_wnd_scale = ilog2(rcv_wnd_size) -
            ilog2(QED_IWARP_RCV_WND_SIZE_MIN);
+       iwarp_info->rcv_wnd_size = rcv_wnd_size >> iwarp_info->rcv_wnd_scale;
        iwarp_info->crc_needed = QED_IWARP_PARAM_CRC_NEEDED;
        iwarp_info->mpa_rev = MPA_NEGOTIATION_TYPE_ENHANCED;
 
index c1ecd74..b8f612d 100644 (file)
@@ -95,6 +95,7 @@ struct qed_iwarp_info {
        spinlock_t iw_lock;     /* for iwarp resources */
        spinlock_t qp_lock;     /* for teardown races */
        u32 rcv_wnd_scale;
+       u16 rcv_wnd_size;
        u16 max_mtu;
        u8 mac_addr[ETH_ALEN];
        u8 crc_needed;
@@ -187,7 +188,7 @@ int qed_iwarp_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
                    struct qed_rdma_start_in_params *params);
 
 void qed_iwarp_init_fw_ramrod(struct qed_hwfn *p_hwfn,
-                             struct iwarp_init_func_params *p_ramrod);
+                             struct iwarp_init_func_ramrod_data *p_ramrod);
 
 int qed_iwarp_stop(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
 
index 0853389..893ef08 100644 (file)
@@ -223,10 +223,9 @@ _qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn,
        struct qed_queue_cid *p_cid;
        int rc;
 
-       p_cid = vmalloc(sizeof(*p_cid));
+       p_cid = vzalloc(sizeof(*p_cid));
        if (!p_cid)
                return NULL;
-       memset(p_cid, 0, sizeof(*p_cid));
 
        p_cid->opaque_fid = opaque_fid;
        p_cid->cid = cid;
@@ -1969,33 +1968,45 @@ void qed_reset_vport_stats(struct qed_dev *cdev)
                _qed_get_vport_stats(cdev, cdev->reset_stats);
 }
 
-static void
-qed_arfs_mode_configure(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
-                       struct qed_arfs_config_params *p_cfg_params)
+static enum gft_profile_type
+qed_arfs_mode_to_hsi(enum qed_filter_config_mode mode)
 {
-       if (p_cfg_params->arfs_enable) {
-               qed_set_rfs_mode_enable(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
-                                       p_cfg_params->tcp, p_cfg_params->udp,
-                                       p_cfg_params->ipv4, p_cfg_params->ipv6);
-               DP_VERBOSE(p_hwfn, QED_MSG_SP,
-                          "tcp = %s, udp = %s, ipv4 = %s, ipv6 =%s\n",
+       if (mode == QED_FILTER_CONFIG_MODE_5_TUPLE)
+               return GFT_PROFILE_TYPE_4_TUPLE;
+       if (mode == QED_FILTER_CONFIG_MODE_IP_DEST)
+               return GFT_PROFILE_TYPE_IP_DST_PORT;
+       return GFT_PROFILE_TYPE_L4_DST_PORT;
+}
+
+void qed_arfs_mode_configure(struct qed_hwfn *p_hwfn,
+                            struct qed_ptt *p_ptt,
+                            struct qed_arfs_config_params *p_cfg_params)
+{
+       if (p_cfg_params->mode != QED_FILTER_CONFIG_MODE_DISABLE) {
+               qed_gft_config(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
+                              p_cfg_params->tcp,
+                              p_cfg_params->udp,
+                              p_cfg_params->ipv4,
+                              p_cfg_params->ipv6,
+                              qed_arfs_mode_to_hsi(p_cfg_params->mode));
+               DP_VERBOSE(p_hwfn,
+                          QED_MSG_SP,
+                          "Configured Filtering: tcp = %s, udp = %s, ipv4 = %s, ipv6 =%s mode=%08x\n",
                           p_cfg_params->tcp ? "Enable" : "Disable",
                           p_cfg_params->udp ? "Enable" : "Disable",
                           p_cfg_params->ipv4 ? "Enable" : "Disable",
-                          p_cfg_params->ipv6 ? "Enable" : "Disable");
+                          p_cfg_params->ipv6 ? "Enable" : "Disable",
+                          (u32)p_cfg_params->mode);
        } else {
-               qed_set_rfs_mode_disable(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
+               DP_VERBOSE(p_hwfn, QED_MSG_SP, "Disabled Filtering\n");
+               qed_gft_disable(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
        }
-
-       DP_VERBOSE(p_hwfn, QED_MSG_SP, "Configured ARFS mode : %s\n",
-                  p_cfg_params->arfs_enable ? "Enable" : "Disable");
 }
 
-static int
-qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+int
+qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn,
                                struct qed_spq_comp_cb *p_cb,
-                               dma_addr_t p_addr, u16 length, u16 qid,
-                               u8 vport_id, bool b_is_add)
+                               struct qed_ntuple_filter_params *p_params)
 {
        struct rx_update_gft_filter_data *p_ramrod = NULL;
        struct qed_spq_entry *p_ent = NULL;
@@ -2004,13 +2015,15 @@ qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
        u8 abs_vport_id = 0;
        int rc = -EINVAL;
 
-       rc = qed_fw_vport(p_hwfn, vport_id, &abs_vport_id);
+       rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
        if (rc)
                return rc;
 
-       rc = qed_fw_l2_queue(p_hwfn, qid, &abs_rx_q_id);
-       if (rc)
-               return rc;
+       if (p_params->qid != QED_RFS_NTUPLE_QID_RSS) {
+               rc = qed_fw_l2_queue(p_hwfn, p_params->qid, &abs_rx_q_id);
+               if (rc)
+                       return rc;
+       }
 
        /* Get SPQ entry */
        memset(&init_data, 0, sizeof(init_data));
@@ -2032,17 +2045,27 @@ qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
                return rc;
 
        p_ramrod = &p_ent->ramrod.rx_update_gft;
-       DMA_REGPAIR_LE(p_ramrod->pkt_hdr_addr, p_addr);
-       p_ramrod->pkt_hdr_length = cpu_to_le16(length);
-       p_ramrod->rx_qid_or_action_icid = cpu_to_le16(abs_rx_q_id);
-       p_ramrod->vport_id = abs_vport_id;
-       p_ramrod->filter_type = RFS_FILTER_TYPE;
-       p_ramrod->filter_action = b_is_add ? GFT_ADD_FILTER : GFT_DELETE_FILTER;
+
+       DMA_REGPAIR_LE(p_ramrod->pkt_hdr_addr, p_params->addr);
+       p_ramrod->pkt_hdr_length = cpu_to_le16(p_params->length);
+
+       if (p_params->qid != QED_RFS_NTUPLE_QID_RSS) {
+               p_ramrod->rx_qid_valid = 1;
+               p_ramrod->rx_qid = cpu_to_le16(abs_rx_q_id);
+       }
+
+       p_ramrod->flow_id_valid = 0;
+       p_ramrod->flow_id = 0;
+
+       p_ramrod->vport_id = cpu_to_le16((u16)abs_vport_id);
+       p_ramrod->filter_action = p_params->b_is_add ? GFT_ADD_FILTER
+           : GFT_DELETE_FILTER;
 
        DP_VERBOSE(p_hwfn, QED_MSG_SP,
                   "V[%0x], Q[%04x] - %s filter from 0x%llx [length %04xb]\n",
                   abs_vport_id, abs_rx_q_id,
-                  b_is_add ? "Adding" : "Removing", (u64)p_addr, length);
+                  p_params->b_is_add ? "Adding" : "Removing",
+                  (u64)p_params->addr, p_params->length);
 
        return qed_spq_post(p_hwfn, p_ent, NULL);
 }
@@ -2743,7 +2766,8 @@ static int qed_configure_filter(struct qed_dev *cdev,
        }
 }
 
-static int qed_configure_arfs_searcher(struct qed_dev *cdev, bool en_searcher)
+static int qed_configure_arfs_searcher(struct qed_dev *cdev,
+                                      enum qed_filter_config_mode mode)
 {
        struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
        struct qed_arfs_config_params arfs_config_params;
@@ -2753,8 +2777,7 @@ static int qed_configure_arfs_searcher(struct qed_dev *cdev, bool en_searcher)
        arfs_config_params.udp = true;
        arfs_config_params.ipv4 = true;
        arfs_config_params.ipv6 = true;
-       arfs_config_params.arfs_enable = en_searcher;
-
+       arfs_config_params.mode = mode;
        qed_arfs_mode_configure(p_hwfn, p_hwfn->p_arfs_ptt,
                                &arfs_config_params);
        return 0;
@@ -2762,8 +2785,8 @@ static int qed_configure_arfs_searcher(struct qed_dev *cdev, bool en_searcher)
 
 static void
 qed_arfs_sp_response_handler(struct qed_hwfn *p_hwfn,
-                            void *cookie, union event_ring_data *data,
-                            u8 fw_return_code)
+                            void *cookie,
+                            union event_ring_data *data, u8 fw_return_code)
 {
        struct qed_common_cb_ops *op = p_hwfn->cdev->protocol_ops.common;
        void *dev = p_hwfn->cdev->ops_cookie;
@@ -2771,10 +2794,10 @@ qed_arfs_sp_response_handler(struct qed_hwfn *p_hwfn,
        op->arfs_filter_op(dev, cookie, fw_return_code);
 }
 
-static int qed_ntuple_arfs_filter_config(struct qed_dev *cdev, void *cookie,
-                                        dma_addr_t mapping, u16 length,
-                                        u16 vport_id, u16 rx_queue_id,
-                                        bool add_filter)
+static int
+qed_ntuple_arfs_filter_config(struct qed_dev *cdev,
+                             void *cookie,
+                             struct qed_ntuple_filter_params *params)
 {
        struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
        struct qed_spq_comp_cb cb;
@@ -2783,9 +2806,19 @@ static int qed_ntuple_arfs_filter_config(struct qed_dev *cdev, void *cookie,
        cb.function = qed_arfs_sp_response_handler;
        cb.cookie = cookie;
 
-       rc = qed_configure_rfs_ntuple_filter(p_hwfn, p_hwfn->p_arfs_ptt,
-                                            &cb, mapping, length, rx_queue_id,
-                                            vport_id, add_filter);
+       if (params->b_is_vf) {
+               if (!qed_iov_is_valid_vfid(p_hwfn, params->vf_id, false,
+                                          false)) {
+                       DP_INFO(p_hwfn, "vfid 0x%02x is out of bounds\n",
+                               params->vf_id);
+                       return rc;
+               }
+
+               params->vport_id = params->vf_id + 1;
+               params->qid = QED_RFS_NTUPLE_QID_RSS;
+       }
+
+       rc = qed_configure_rfs_ntuple_filter(p_hwfn, &cb, params);
        if (rc)
                DP_NOTICE(p_hwfn,
                          "Failed to issue a-RFS filter configuration\n");
index cc1f248..c4030e9 100644 (file)
@@ -190,7 +190,7 @@ struct qed_arfs_config_params {
        bool udp;
        bool ipv4;
        bool ipv6;
-       bool arfs_enable;
+       enum qed_filter_config_mode mode;
 };
 
 struct qed_sp_vport_update_params {
@@ -277,6 +277,37 @@ void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats);
 
 void qed_reset_vport_stats(struct qed_dev *cdev);
 
+/**
+ * *@brief qed_arfs_mode_configure -
+ *
+ **Enable or disable rfs mode. It must accept atleast one of tcp or udp true
+ **and atleast one of ipv4 or ipv6 true to enable rfs mode.
+ *
+ **@param p_hwfn
+ **@param p_ptt
+ **@param p_cfg_params - arfs mode configuration parameters.
+ *
+ */
+void qed_arfs_mode_configure(struct qed_hwfn *p_hwfn,
+                            struct qed_ptt *p_ptt,
+                            struct qed_arfs_config_params *p_cfg_params);
+
+/**
+ * @brief - qed_configure_rfs_ntuple_filter
+ *
+ * This ramrod should be used to add or remove arfs hw filter
+ *
+ * @params p_hwfn
+ * @params p_cb - Used for QED_SPQ_MODE_CB,where client would initialize
+ *               it with cookie and callback function address, if not
+ *               using this mode then client must pass NULL.
+ * @params p_params
+ */
+int
+qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn,
+                               struct qed_spq_comp_cb *p_cb,
+                               struct qed_ntuple_filter_params *p_params);
+
 #define MAX_QUEUES_PER_QZONE    (sizeof(unsigned long) * 8)
 #define QED_QUEUE_CID_SELF     (0xff)
 
index 047f556..c4f14fd 100644 (file)
@@ -406,6 +406,9 @@ static void qed_ll2_rxq_parse_gsi(struct qed_hwfn *p_hwfn,
        data->opaque_data_0 = le32_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrhi);
        data->opaque_data_1 = le16_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrlo);
        data->u.data_length_error = p_cqe->rx_cqe_gsi.data_length_error;
+       data->qp_id = le16_to_cpu(p_cqe->rx_cqe_gsi.qp_id);
+
+       data->src_qp = le32_to_cpu(p_cqe->rx_cqe_gsi.src_qp);
 }
 
 static void qed_ll2_rxq_parse_reg(struct qed_hwfn *p_hwfn,
@@ -927,7 +930,8 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
                       qed_chain_get_pbl_phys(&p_rx->rcq_chain));
 
        p_ramrod->drop_ttl0_flg = p_ll2_conn->input.rx_drop_ttl0_flg;
-       p_ramrod->inner_vlan_removal_en = p_ll2_conn->input.rx_vlan_removal_en;
+       p_ramrod->inner_vlan_stripping_en =
+               p_ll2_conn->input.rx_vlan_removal_en;
        p_ramrod->queue_id = p_ll2_conn->queue_id;
        p_ramrod->main_func_queue = p_ll2_conn->main_func_queue ? 1 : 0;
 
@@ -1299,8 +1303,20 @@ int qed_ll2_acquire_connection(void *cxt, struct qed_ll2_acquire_data *data)
 
        memcpy(&p_ll2_info->input, &data->input, sizeof(p_ll2_info->input));
 
-       p_ll2_info->tx_dest = (data->input.tx_dest == QED_LL2_TX_DEST_NW) ?
-                             CORE_TX_DEST_NW : CORE_TX_DEST_LB;
+       switch (data->input.tx_dest) {
+       case QED_LL2_TX_DEST_NW:
+               p_ll2_info->tx_dest = CORE_TX_DEST_NW;
+               break;
+       case QED_LL2_TX_DEST_LB:
+               p_ll2_info->tx_dest = CORE_TX_DEST_LB;
+               break;
+       case QED_LL2_TX_DEST_DROP:
+               p_ll2_info->tx_dest = CORE_TX_DEST_DROP;
+               break;
+       default:
+               return -EINVAL;
+       }
+
        if (data->input.conn_type == QED_LL2_TYPE_OOO ||
            data->input.secondary_queue)
                p_ll2_info->main_func_queue = false;
@@ -2281,8 +2297,7 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
                goto release_terminate;
        }
 
-       if (cdev->hwfns[0].hw_info.personality == QED_PCI_ISCSI &&
-           cdev->hwfns[0].pf_params.iscsi_pf_params.ooo_enable) {
+       if (QED_LEADING_HWFN(cdev)->hw_info.personality == QED_PCI_ISCSI) {
                DP_VERBOSE(cdev, QED_MSG_STORAGE, "Starting OOO LL2 queue\n");
                rc = qed_ll2_start_ooo(cdev, params);
                if (rc) {
@@ -2340,8 +2355,7 @@ static int qed_ll2_stop(struct qed_dev *cdev)
        qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt);
        eth_zero_addr(cdev->ll2_mac_address);
 
-       if (cdev->hwfns[0].hw_info.personality == QED_PCI_ISCSI &&
-           cdev->hwfns[0].pf_params.iscsi_pf_params.ooo_enable)
+       if (QED_LEADING_HWFN(cdev)->hw_info.personality == QED_PCI_ISCSI)
                qed_ll2_stop_ooo(cdev);
 
        rc = qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev),
index 8b99c7d..6f46cb1 100644 (file)
@@ -2234,7 +2234,7 @@ int qed_mcp_nvm_read(struct qed_dev *cdev, u32 addr, u8 *p_buf, u32 len)
                                        DRV_MSG_CODE_NVM_READ_NVRAM,
                                        addr + offset +
                                        (bytes_to_copy <<
-                                        DRV_MB_PARAM_NVM_LEN_SHIFT),
+                                        DRV_MB_PARAM_NVM_LEN_OFFSET),
                                        &resp, &resp_param,
                                        &read_len,
                                        (u32 *)(p_buf + offset));
index c8c4b39..bdc46f1 100644 (file)
@@ -553,7 +553,7 @@ static int qed_rdma_start_fw(struct qed_hwfn *p_hwfn,
 
        if (QED_IS_IWARP_PERSONALITY(p_hwfn)) {
                qed_iwarp_init_fw_ramrod(p_hwfn,
-                                        &p_ent->ramrod.iwarp_init_func.iwarp);
+                                        &p_ent->ramrod.iwarp_init_func);
                p_ramrod = &p_ent->ramrod.iwarp_init_func.rdma;
        } else {
                p_ramrod = &p_ent->ramrod.roce_init_func.rdma;
index 0cdb433..f712205 100644 (file)
        0x1f0434UL
 #define PRS_REG_SEARCH_TAG1 \
        0x1f0444UL
+#define PRS_REG_SEARCH_TENANT_ID \
+       0x1f044cUL
 #define PRS_REG_PKT_LEN_STAT_TAGS_NOT_COUNTED_FIRST \
        0x1f0a0cUL
 #define PRS_REG_SEARCH_TCP_FIRST_FRAG \
        0x2e8800UL
 #define CCFC_REG_STRONG_ENABLE_VF \
        0x2e070cUL
-#define  CDU_REG_CID_ADDR_PARAMS       \
+#define CDU_REG_CCFC_CTX_VALID0 \
+       0x580400UL
+#define CDU_REG_CCFC_CTX_VALID1 \
+       0x580404UL
+#define CDU_REG_TCFC_CTX_VALID0 \
+       0x580408UL
+#define  CDU_REG_CID_ADDR_PARAMS \
        0x580900UL
 #define  DBG_REG_CLIENT_ENABLE \
        0x010004UL
 #define PRS_REG_ENCAPSULATION_TYPE_EN  0x1f0730UL
 #define PRS_REG_GRE_PROTOCOL           0x1f0734UL
 #define PRS_REG_VXLAN_PORT             0x1f0738UL
-#define PRS_REG_OUTPUT_FORMAT_4_0      0x1f099cUL
+#define PRS_REG_OUTPUT_FORMAT_4_0_BB_K2        0x1f099cUL
 #define NIG_REG_ENC_TYPE_ENABLE                0x501058UL
 
 #define NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE            (0x1 << 0)
 #define PRS_REG_NGE_PORT               0x1f086cUL
 #define NIG_REG_NGE_PORT               0x508b38UL
 
-#define DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN     0x10090cUL
-#define DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN      0x100910UL
-#define DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN       0x100914UL
-#define DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN      0x10092cUL
-#define DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN     0x100930UL
+#define DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN             0x10090cUL
+#define DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN              0x100910UL
+#define DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN               0x100914UL
+#define DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN_K2_E5                0x10092cUL
+#define DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN_K2_E5       0x100930UL
 
 #define NIG_REG_NGE_IP_ENABLE                  0x508b28UL
 #define NIG_REG_NGE_ETH_ENABLE                 0x508b2cUL
 #define QM_REG_WFQPFWEIGHT     0x2f4e80UL
 #define QM_REG_WFQVPWEIGHT     0x2fa000UL
 
-#define PGLCS_REG_DBG_SELECT_K2 \
+#define PGLCS_REG_DBG_SELECT_K2_E5 \
        0x001d14UL
-#define PGLCS_REG_DBG_DWORD_ENABLE_K2 \
+#define PGLCS_REG_DBG_DWORD_ENABLE_K2_E5 \
        0x001d18UL
-#define PGLCS_REG_DBG_SHIFT_K2 \
+#define PGLCS_REG_DBG_SHIFT_K2_E5 \
        0x001d1cUL
-#define PGLCS_REG_DBG_FORCE_VALID_K2 \
+#define PGLCS_REG_DBG_FORCE_VALID_K2_E5 \
        0x001d20UL
-#define PGLCS_REG_DBG_FORCE_FRAME_K2 \
+#define PGLCS_REG_DBG_FORCE_FRAME_K2_E5 \
        0x001d24UL
 #define MISC_REG_RESET_PL_PDA_VMAIN_1 \
        0x008070UL
        0x009050UL
 #define MISCS_REG_RESET_PL_HV \
        0x009060UL
-#define MISCS_REG_RESET_PL_HV_2_K2     \
+#define MISCS_REG_RESET_PL_HV_2_K2_E5 \
        0x009150UL
 #define DMAE_REG_DBG_SELECT \
        0x00c510UL
        0x0500b0UL
 #define GRC_REG_DBG_FORCE_FRAME        \
        0x0500b4UL
-#define UMAC_REG_DBG_SELECT_K2 \
+#define UMAC_REG_DBG_SELECT_K2_E5 \
        0x051094UL
-#define UMAC_REG_DBG_DWORD_ENABLE_K2 \
+#define UMAC_REG_DBG_DWORD_ENABLE_K2_E5 \
        0x051098UL
-#define UMAC_REG_DBG_SHIFT_K2 \
+#define UMAC_REG_DBG_SHIFT_K2_E5 \
        0x05109cUL
-#define UMAC_REG_DBG_FORCE_VALID_K2 \
+#define UMAC_REG_DBG_FORCE_VALID_K2_E5 \
        0x0510a0UL
-#define UMAC_REG_DBG_FORCE_FRAME_K2 \
+#define UMAC_REG_DBG_FORCE_FRAME_K2_E5 \
        0x0510a4UL
 #define MCP2_REG_DBG_SELECT \
        0x052400UL
        0x1f0ba0UL
 #define PRS_REG_DBG_FORCE_FRAME        \
        0x1f0ba4UL
-#define CNIG_REG_DBG_SELECT_K2 \
+#define CNIG_REG_DBG_SELECT_K2_E5 \
        0x218254UL
-#define CNIG_REG_DBG_DWORD_ENABLE_K2 \
+#define CNIG_REG_DBG_DWORD_ENABLE_K2_E5 \
        0x218258UL
-#define CNIG_REG_DBG_SHIFT_K2 \
+#define CNIG_REG_DBG_SHIFT_K2_E5 \
        0x21825cUL
-#define CNIG_REG_DBG_FORCE_VALID_K2 \
+#define CNIG_REG_DBG_FORCE_VALID_K2_E5 \
        0x218260UL
-#define CNIG_REG_DBG_FORCE_FRAME_K2 \
+#define CNIG_REG_DBG_FORCE_FRAME_K2_E5 \
        0x218264UL
 #define PRM_REG_DBG_SELECT \
        0x2306a8UL
        0x580710UL
 #define CDU_REG_DBG_FORCE_FRAME        \
        0x580714UL
-#define WOL_REG_DBG_SELECT_K2 \
+#define WOL_REG_DBG_SELECT_K2_E5 \
        0x600140UL
-#define WOL_REG_DBG_DWORD_ENABLE_K2 \
+#define WOL_REG_DBG_DWORD_ENABLE_K2_E5 \
        0x600144UL
-#define WOL_REG_DBG_SHIFT_K2 \
+#define WOL_REG_DBG_SHIFT_K2_E5 \
        0x600148UL
-#define WOL_REG_DBG_FORCE_VALID_K2 \
+#define WOL_REG_DBG_FORCE_VALID_K2_E5 \
        0x60014cUL
-#define WOL_REG_DBG_FORCE_FRAME_K2 \
+#define WOL_REG_DBG_FORCE_FRAME_K2_E5 \
        0x600150UL
-#define BMBN_REG_DBG_SELECT_K2 \
+#define BMBN_REG_DBG_SELECT_K2_E5 \
        0x610140UL
-#define BMBN_REG_DBG_DWORD_ENABLE_K2 \
+#define BMBN_REG_DBG_DWORD_ENABLE_K2_E5 \
        0x610144UL
-#define BMBN_REG_DBG_SHIFT_K2 \
+#define BMBN_REG_DBG_SHIFT_K2_E5 \
        0x610148UL
-#define BMBN_REG_DBG_FORCE_VALID_K2 \
+#define BMBN_REG_DBG_FORCE_VALID_K2_E5 \
        0x61014cUL
-#define BMBN_REG_DBG_FORCE_FRAME_K2 \
+#define BMBN_REG_DBG_FORCE_FRAME_K2_E5 \
        0x610150UL
-#define NWM_REG_DBG_SELECT_K2 \
+#define NWM_REG_DBG_SELECT_K2_E5 \
        0x8000ecUL
-#define NWM_REG_DBG_DWORD_ENABLE_K2 \
+#define NWM_REG_DBG_DWORD_ENABLE_K2_E5 \
        0x8000f0UL
-#define NWM_REG_DBG_SHIFT_K2 \
+#define NWM_REG_DBG_SHIFT_K2_E5 \
        0x8000f4UL
-#define NWM_REG_DBG_FORCE_VALID_K2 \
+#define NWM_REG_DBG_FORCE_VALID_K2_E5 \
        0x8000f8UL
-#define NWM_REG_DBG_FORCE_FRAME_K2\
+#define NWM_REG_DBG_FORCE_FRAME_K2_E5 \
        0x8000fcUL
 #define PBF_REG_DBG_SELECT \
        0xd80060UL
        0x1901534UL
 #define USEM_REG_DBG_FORCE_FRAME \
        0x1901538UL
-#define NWS_REG_DBG_SELECT_K2 \
+#define NWS_REG_DBG_SELECT_K2_E5 \
        0x700128UL
-#define NWS_REG_DBG_DWORD_ENABLE_K2 \
+#define NWS_REG_DBG_DWORD_ENABLE_K2_E5 \
        0x70012cUL
-#define NWS_REG_DBG_SHIFT_K2 \
+#define NWS_REG_DBG_SHIFT_K2_E5 \
        0x700130UL
-#define NWS_REG_DBG_FORCE_VALID_K2 \
+#define NWS_REG_DBG_FORCE_VALID_K2_E5 \
        0x700134UL
-#define NWS_REG_DBG_FORCE_FRAME_K2 \
+#define NWS_REG_DBG_FORCE_FRAME_K2_E5 \
        0x700138UL
-#define MS_REG_DBG_SELECT_K2 \
+#define MS_REG_DBG_SELECT_K2_E5 \
        0x6a0228UL
-#define MS_REG_DBG_DWORD_ENABLE_K2 \
+#define MS_REG_DBG_DWORD_ENABLE_K2_E5 \
        0x6a022cUL
-#define MS_REG_DBG_SHIFT_K2 \
+#define MS_REG_DBG_SHIFT_K2_E5 \
        0x6a0230UL
-#define MS_REG_DBG_FORCE_VALID_K2 \
+#define MS_REG_DBG_FORCE_VALID_K2_E5 \
        0x6a0234UL
-#define MS_REG_DBG_FORCE_FRAME_K2 \
+#define MS_REG_DBG_FORCE_FRAME_K2_E5 \
        0x6a0238UL
-#define PCIE_REG_DBG_COMMON_SELECT_K2 \
+#define PCIE_REG_DBG_COMMON_SELECT_K2_E5 \
        0x054398UL
-#define PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2 \
+#define PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2_E5 \
        0x05439cUL
-#define PCIE_REG_DBG_COMMON_SHIFT_K2 \
+#define PCIE_REG_DBG_COMMON_SHIFT_K2_E5 \
        0x0543a0UL
-#define PCIE_REG_DBG_COMMON_FORCE_VALID_K2 \
+#define PCIE_REG_DBG_COMMON_FORCE_VALID_K2_E5 \
        0x0543a4UL
-#define PCIE_REG_DBG_COMMON_FORCE_FRAME_K2 \
+#define PCIE_REG_DBG_COMMON_FORCE_FRAME_K2_E5 \
        0x0543a8UL
+#define PTLD_REG_DBG_SELECT_E5 \
+       0x5a1600UL
+#define PTLD_REG_DBG_DWORD_ENABLE_E5 \
+       0x5a1604UL
+#define PTLD_REG_DBG_SHIFT_E5 \
+       0x5a1608UL
+#define PTLD_REG_DBG_FORCE_VALID_E5 \
+       0x5a160cUL
+#define PTLD_REG_DBG_FORCE_FRAME_E5 \
+       0x5a1610UL
+#define YPLD_REG_DBG_SELECT_E5 \
+       0x5c1600UL
+#define YPLD_REG_DBG_DWORD_ENABLE_E5 \
+       0x5c1604UL
+#define YPLD_REG_DBG_SHIFT_E5 \
+       0x5c1608UL
+#define YPLD_REG_DBG_FORCE_VALID_E5 \
+       0x5c160cUL
+#define YPLD_REG_DBG_FORCE_FRAME_E5 \
+       0x5c1610UL
+#define RGSRC_REG_DBG_SELECT_E5        \
+       0x320040UL
+#define RGSRC_REG_DBG_DWORD_ENABLE_E5 \
+       0x320044UL
+#define RGSRC_REG_DBG_SHIFT_E5 \
+       0x320048UL
+#define RGSRC_REG_DBG_FORCE_VALID_E5 \
+       0x32004cUL
+#define RGSRC_REG_DBG_FORCE_FRAME_E5 \
+       0x320050UL
+#define TGSRC_REG_DBG_SELECT_E5        \
+       0x322040UL
+#define TGSRC_REG_DBG_DWORD_ENABLE_E5 \
+       0x322044UL
+#define TGSRC_REG_DBG_SHIFT_E5 \
+       0x322048UL
+#define TGSRC_REG_DBG_FORCE_VALID_E5 \
+       0x32204cUL
+#define TGSRC_REG_DBG_FORCE_FRAME_E5 \
+       0x322050UL
 #define MISC_REG_RESET_PL_UA \
        0x008050UL
 #define MISC_REG_RESET_PL_HV \
        0x1940000UL
 #define SEM_FAST_REG_INT_RAM \
        0x020000UL
-#define SEM_FAST_REG_INT_RAM_SIZE \
+#define SEM_FAST_REG_INT_RAM_SIZE_BB_K2 \
        20480
 #define GRC_REG_TRACE_FIFO_VALID_DATA \
        0x050064UL
        0x340800UL
 #define BRB_REG_BIG_RAM_DATA \
        0x341500UL
+#define BRB_REG_BIG_RAM_DATA_SIZE \
+       64
 #define SEM_FAST_REG_STALL_0_BB_K2 \
        0x000488UL
 #define SEM_FAST_REG_STALLED \
        0x238c30UL
 #define MISCS_REG_BLOCK_256B_EN \
        0x009074UL
-#define MCP_REG_SCRATCH_SIZE \
+#define MCP_REG_SCRATCH_SIZE_BB_K2 \
        57344
 #define MCP_REG_CPU_REG_FILE \
        0xe05200UL
        0x008c14UL
 #define NWS_REG_NWS_CMU_K2     \
        0x720000UL
-#define PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2 \
+#define PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2_E5 \
        0x000680UL
-#define PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2 \
+#define PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2_E5 \
        0x000684UL
-#define PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2 \
+#define PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2_E5 \
        0x0006c0UL
-#define PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2 \
+#define PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2_E5 \
        0x0006c4UL
-#define MS_REG_MS_CMU_K2 \
+#define MS_REG_MS_CMU_K2_E5 \
        0x6a4000UL
-#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2 \
+#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2_E5 \
        0x000208UL
-#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2 \
+#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2_E5 \
        0x00020cUL
-#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2 \
+#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2_E5 \
        0x000210UL
-#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2 \
+#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2_E5 \
        0x000214UL
-#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2 \
+#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2_E5 \
        0x000208UL
-#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2 \
+#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2_E5 \
        0x00020cUL
-#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2 \
+#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2_E5 \
        0x000210UL
-#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2 \
+#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2_E5 \
        0x000214UL
-#define PHY_PCIE_REG_PHY0_K2 \
+#define PHY_PCIE_REG_PHY0_K2_E5 \
        0x620000UL
-#define PHY_PCIE_REG_PHY1_K2 \
+#define PHY_PCIE_REG_PHY1_K2_E5 \
        0x624000UL
 #define NIG_REG_ROCE_DUPLICATE_TO_HOST 0x5088f0UL
 #define PRS_REG_LIGHT_L2_ETHERTYPE_EN 0x1f0968UL
index a1d33f3..5e927b6 100644 (file)
@@ -351,7 +351,9 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
                DP_NOTICE(p_hwfn, "Unsupported MF mode, init as DEFAULT\n");
                p_ramrod->mf_mode = MF_NPAR;
        }
-       p_ramrod->outer_tag = p_hwfn->hw_info.ovlan;
+
+       p_ramrod->outer_tag_config.outer_tag.tci =
+               cpu_to_le16(p_hwfn->hw_info.ovlan);
 
        /* Place EQ address in RAMROD */
        DMA_REGPAIR_LE(p_ramrod->event_ring_pbl_addr,
@@ -396,8 +398,8 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
        p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MINOR;
 
        DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
-                  "Setting event_ring_sb [id %04x index %02x], outer_tag [%d]\n",
-                  sb, sb_index, p_ramrod->outer_tag);
+                  "Setting event_ring_sb [id %04x index %02x], outer_tag.tci [%d]\n",
+                  sb, sb_index, p_ramrod->outer_tag_config.outer_tag.tci);
 
        rc = qed_spq_post(p_hwfn, p_ent, NULL);
 
index be48d9a..217b62a 100644 (file)
@@ -215,7 +215,7 @@ static int qed_spq_fill_entry(struct qed_hwfn *p_hwfn,
 static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn,
                                  struct qed_spq *p_spq)
 {
-       struct core_conn_context *p_cxt;
+       struct e4_core_conn_context *p_cxt;
        struct qed_cxt_info cxt_info;
        u16 physical_q;
        int rc;
@@ -233,11 +233,11 @@ static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn,
        p_cxt = cxt_info.p_cxt;
 
        SET_FIELD(p_cxt->xstorm_ag_context.flags10,
-                 XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
+                 E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
        SET_FIELD(p_cxt->xstorm_ag_context.flags1,
-                 XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
+                 E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
        SET_FIELD(p_cxt->xstorm_ag_context.flags9,
-                 XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
+                 E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
 
        /* QM physical queue */
        physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB);
index 3f40b1d..5acb91b 100644 (file)
@@ -153,9 +153,9 @@ static int qed_sp_vf_stop(struct qed_hwfn *p_hwfn,
        return qed_spq_post(p_hwfn, p_ent, NULL);
 }
 
-static bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn,
-                                 int rel_vf_id,
-                                 bool b_enabled_only, bool b_non_malicious)
+bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn,
+                          int rel_vf_id,
+                          bool b_enabled_only, bool b_non_malicious)
 {
        if (!p_hwfn->pf_iov_info) {
                DP_NOTICE(p_hwfn->cdev, "No iov info\n");
@@ -1621,7 +1621,7 @@ static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn,
        /* fill in pfdev info */
        pfdev_info->chip_num = p_hwfn->cdev->chip_num;
        pfdev_info->db_size = 0;
-       pfdev_info->indices_per_sb = PIS_PER_SB;
+       pfdev_info->indices_per_sb = PIS_PER_SB_E4;
 
        pfdev_info->capabilities = PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED |
                                   PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE;
@@ -3582,11 +3582,11 @@ static int
 qed_iov_vf_flr_poll_pbf(struct qed_hwfn *p_hwfn,
                        struct qed_vf_info *p_vf, struct qed_ptt *p_ptt)
 {
-       u32 cons[MAX_NUM_VOQS], distance[MAX_NUM_VOQS];
+       u32 cons[MAX_NUM_VOQS_E4], distance[MAX_NUM_VOQS_E4];
        int i, cnt;
 
        /* Read initial consumers & producers */
-       for (i = 0; i < MAX_NUM_VOQS; i++) {
+       for (i = 0; i < MAX_NUM_VOQS_E4; i++) {
                u32 prod;
 
                cons[i] = qed_rd(p_hwfn, p_ptt,
@@ -3601,7 +3601,7 @@ qed_iov_vf_flr_poll_pbf(struct qed_hwfn *p_hwfn,
        /* Wait for consumers to pass the producers */
        i = 0;
        for (cnt = 0; cnt < 50; cnt++) {
-               for (; i < MAX_NUM_VOQS; i++) {
+               for (; i < MAX_NUM_VOQS_E4; i++) {
                        u32 tmp;
 
                        tmp = qed_rd(p_hwfn, p_ptt,
@@ -3611,7 +3611,7 @@ qed_iov_vf_flr_poll_pbf(struct qed_hwfn *p_hwfn,
                                break;
                }
 
-               if (i == MAX_NUM_VOQS)
+               if (i == MAX_NUM_VOQS_E4)
                        break;
 
                msleep(20);
@@ -4237,6 +4237,7 @@ qed_iov_bulletin_get_forced_vlan(struct qed_hwfn *p_hwfn, u16 rel_vf_id)
 static int qed_iov_configure_tx_rate(struct qed_hwfn *p_hwfn,
                                     struct qed_ptt *p_ptt, int vfid, int val)
 {
+       struct qed_mcp_link_state *p_link;
        struct qed_vf_info *vf;
        u8 abs_vp_id = 0;
        int rc;
@@ -4249,7 +4250,10 @@ static int qed_iov_configure_tx_rate(struct qed_hwfn *p_hwfn,
        if (rc)
                return rc;
 
-       return qed_init_vport_rl(p_hwfn, p_ptt, abs_vp_id, (u32)val);
+       p_link = &QED_LEADING_HWFN(p_hwfn->cdev)->mcp_info->link_output;
+
+       return qed_init_vport_rl(p_hwfn, p_ptt, abs_vp_id, (u32)val,
+                                p_link->speed);
 }
 
 static int
index 3955929..9a8fd79 100644 (file)
@@ -274,6 +274,23 @@ enum qed_iov_wq_flag {
 
 #ifdef CONFIG_QED_SRIOV
 /**
+ * @brief Check if given VF ID @vfid is valid
+ *        w.r.t. @b_enabled_only value
+ *        if b_enabled_only = true - only enabled VF id is valid
+ *        else any VF id less than max_vfs is valid
+ *
+ * @param p_hwfn
+ * @param rel_vf_id - Relative VF ID
+ * @param b_enabled_only - consider only enabled VF
+ * @param b_non_malicious - true iff we want to validate vf isn't malicious.
+ *
+ * @return bool - true for valid VF ID
+ */
+bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn,
+                          int rel_vf_id,
+                          bool b_enabled_only, bool b_non_malicious);
+
+/**
  * @brief - Given a VF index, return index of next [including that] active VF.
  *
  * @param p_hwfn
@@ -376,6 +393,13 @@ void qed_vf_start_iov_wq(struct qed_dev *cdev);
 int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled);
 void qed_inform_vf_link_state(struct qed_hwfn *hwfn);
 #else
+static inline bool
+qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn,
+                     int rel_vf_id, bool b_enabled_only, bool b_non_malicious)
+{
+       return false;
+}
+
 static inline u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn,
                                             u16 rel_vf_id)
 {
index 8a33651..9935978 100644 (file)
@@ -40,6 +40,7 @@
 #include <linux/kernel.h>
 #include <linux/mutex.h>
 #include <linux/bpf.h>
+#include <net/xdp.h>
 #include <linux/qed/qede_rdma.h>
 #include <linux/io.h>
 #ifdef CONFIG_RFS_ACCEL
@@ -52,9 +53,9 @@
 #include <linux/qed/qed_eth_if.h>
 
 #define QEDE_MAJOR_VERSION             8
-#define QEDE_MINOR_VERSION             10
-#define QEDE_REVISION_VERSION          10
-#define QEDE_ENGINEERING_VERSION       21
+#define QEDE_MINOR_VERSION             33
+#define QEDE_REVISION_VERSION          0
+#define QEDE_ENGINEERING_VERSION       20
 #define DRV_MODULE_VERSION __stringify(QEDE_MAJOR_VERSION) "." \
                __stringify(QEDE_MINOR_VERSION) "."             \
                __stringify(QEDE_REVISION_VERSION) "."          \
@@ -345,6 +346,7 @@ struct qede_rx_queue {
        u64 xdp_no_pass;
 
        void *handle;
+       struct xdp_rxq_info xdp_rxq;
 };
 
 union db_prod {
index 77aa826..6687e04 100644 (file)
@@ -98,10 +98,18 @@ static void qede_configure_arfs_fltr(struct qede_dev *edev,
                                     u16 rxq_id, bool add_fltr)
 {
        const struct qed_eth_ops *op = edev->ops;
+       struct qed_ntuple_filter_params params;
 
        if (n->used)
                return;
 
+       memset(&params, 0, sizeof(params));
+
+       params.addr = n->mapping;
+       params.length = n->buf_len;
+       params.qid = rxq_id;
+       params.b_is_add = add_fltr;
+
        DP_VERBOSE(edev, NETIF_MSG_RX_STATUS,
                   "%s arfs filter flow_id=%d, sw_id=%d, src_port=%d, dst_port=%d, rxq=%d\n",
                   add_fltr ? "Adding" : "Deleting",
@@ -110,8 +118,7 @@ static void qede_configure_arfs_fltr(struct qede_dev *edev,
 
        n->used = true;
        n->filter_op = add_fltr;
-       op->ntuple_filter_config(edev->cdev, n, n->mapping, n->buf_len, 0,
-                                rxq_id, add_fltr);
+       op->ntuple_filter_config(edev->cdev, n, &params);
 }
 
 static void
@@ -141,7 +148,10 @@ qede_enqueue_fltr_and_config_searcher(struct qede_dev *edev,
        edev->arfs->filter_count++;
 
        if (edev->arfs->filter_count == 1 && !edev->arfs->enable) {
-               edev->ops->configure_arfs_searcher(edev->cdev, true);
+               enum qed_filter_config_mode mode;
+
+               mode = QED_FILTER_CONFIG_MODE_5_TUPLE;
+               edev->ops->configure_arfs_searcher(edev->cdev, mode);
                edev->arfs->enable = true;
        }
 
@@ -160,8 +170,11 @@ qede_dequeue_fltr_and_config_searcher(struct qede_dev *edev,
        edev->arfs->filter_count--;
 
        if (!edev->arfs->filter_count && edev->arfs->enable) {
+               enum qed_filter_config_mode mode;
+
+               mode = QED_FILTER_CONFIG_MODE_DISABLE;
                edev->arfs->enable = false;
-               edev->ops->configure_arfs_searcher(edev->cdev, false);
+               edev->ops->configure_arfs_searcher(edev->cdev, mode);
        }
 }
 
@@ -255,8 +268,11 @@ void qede_process_arfs_filters(struct qede_dev *edev, bool free_fltr)
 
        if (!edev->arfs->filter_count) {
                if (edev->arfs->enable) {
+                       enum qed_filter_config_mode mode;
+
+                       mode = QED_FILTER_CONFIG_MODE_DISABLE;
                        edev->arfs->enable = false;
-                       edev->ops->configure_arfs_searcher(edev->cdev, false);
+                       edev->ops->configure_arfs_searcher(edev->cdev, mode);
                }
 #ifdef CONFIG_RFS_ACCEL
        } else {
index 48ec4c5..dafc079 100644 (file)
@@ -1006,6 +1006,7 @@ static bool qede_rx_xdp(struct qede_dev *edev,
        xdp.data = xdp.data_hard_start + *data_offset;
        xdp_set_data_meta_invalid(&xdp);
        xdp.data_end = xdp.data + *len;
+       xdp.rxq = &rxq->xdp_rxq;
 
        /* Queues always have a full reset currently, so for the time
         * being until there's atomic program replace just mark read
index 90d79ae..2db70ea 100644 (file)
@@ -765,6 +765,12 @@ static void qede_free_fp_array(struct qede_dev *edev)
                        fp = &edev->fp_array[i];
 
                        kfree(fp->sb_info);
+                       /* Handle mem alloc failure case where qede_init_fp
+                        * didn't register xdp_rxq_info yet.
+                        * Implicit only (fp->type & QEDE_FASTPATH_RX)
+                        */
+                       if (fp->rxq && xdp_rxq_info_is_reg(&fp->rxq->xdp_rxq))
+                               xdp_rxq_info_unreg(&fp->rxq->xdp_rxq);
                        kfree(fp->rxq);
                        kfree(fp->xdp_tx);
                        kfree(fp->txq);
@@ -1147,7 +1153,7 @@ static void qede_free_mem_sb(struct qede_dev *edev, struct qed_sb_info *sb_info,
 static int qede_alloc_mem_sb(struct qede_dev *edev,
                             struct qed_sb_info *sb_info, u16 sb_id)
 {
-       struct status_block *sb_virt;
+       struct status_block_e4 *sb_virt;
        dma_addr_t sb_phys;
        int rc;
 
@@ -1493,6 +1499,10 @@ static void qede_init_fp(struct qede_dev *edev)
                        else
                                fp->rxq->data_direction = DMA_FROM_DEVICE;
                        fp->rxq->dev = &edev->pdev->dev;
+
+                       /* Driver have no error path from here */
+                       WARN_ON(xdp_rxq_info_reg(&fp->rxq->xdp_rxq, edev->ndev,
+                                                fp->rxq->rxq_id) < 0);
                }
 
                if (fp->type & QEDE_FASTPATH_TX) {
index 70c92b6..38c924b 100644 (file)
@@ -253,18 +253,18 @@ static int emac_open(struct net_device *netdev)
                return ret;
        }
 
-       ret = emac_mac_up(adpt);
+       ret = adpt->phy.open(adpt);
        if (ret) {
                emac_mac_rx_tx_rings_free_all(adpt);
                free_irq(irq->irq, irq);
                return ret;
        }
 
-       ret = adpt->phy.open(adpt);
+       ret = emac_mac_up(adpt);
        if (ret) {
-               emac_mac_down(adpt);
                emac_mac_rx_tx_rings_free_all(adpt);
                free_irq(irq->irq, irq);
+               adpt->phy.close(adpt);
                return ret;
        }
 
index 009780d..c87f57c 100644 (file)
@@ -2205,8 +2205,7 @@ out_dma_free:
        if (chip_id != RCAR_GEN2)
                ravb_ptp_stop(ndev);
 out_release:
-       if (ndev)
-               free_netdev(ndev);
+       free_netdev(ndev);
 
        pm_runtime_put(&pdev->dev);
        pm_runtime_disable(&pdev->dev);
index 7532300..d47bbbb 100644 (file)
@@ -3282,8 +3282,7 @@ out_napi_del:
 
 out_release:
        /* net_dev free */
-       if (ndev)
-               free_netdev(ndev);
+       free_netdev(ndev);
 
        pm_runtime_put(&pdev->dev);
        pm_runtime_disable(&pdev->dev);
index 1f64c7f..8ae467d 100644 (file)
@@ -233,7 +233,7 @@ static int efx_ef10_get_vf_index(struct efx_nic *efx)
 
 static int efx_ef10_init_datapath_caps(struct efx_nic *efx)
 {
-       MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CAPABILITIES_V3_OUT_LEN);
+       MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CAPABILITIES_V4_OUT_LEN);
        struct efx_ef10_nic_data *nic_data = efx->nic_data;
        size_t outlen;
        int rc;
@@ -306,6 +306,19 @@ static int efx_ef10_init_datapath_caps(struct efx_nic *efx)
                          efx->vi_stride);
        }
 
+       if (outlen >= MC_CMD_GET_CAPABILITIES_V4_OUT_LEN) {
+               efx->num_mac_stats = MCDI_WORD(outbuf,
+                               GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS);
+               netif_dbg(efx, probe, efx->net_dev,
+                         "firmware reports num_mac_stats = %u\n",
+                         efx->num_mac_stats);
+       } else {
+               /* leave num_mac_stats as the default value, MC_CMD_MAC_NSTATS */
+               netif_dbg(efx, probe, efx->net_dev,
+                         "firmware did not report num_mac_stats, assuming %u\n",
+                         efx->num_mac_stats);
+       }
+
        return 0;
 }
 
@@ -1630,6 +1643,29 @@ static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = {
        EF10_DMA_STAT(tx_bad, VADAPTER_TX_BAD_PACKETS),
        EF10_DMA_STAT(tx_bad_bytes, VADAPTER_TX_BAD_BYTES),
        EF10_DMA_STAT(tx_overflow, VADAPTER_TX_OVERFLOW),
+       EF10_DMA_STAT(fec_uncorrected_errors, FEC_UNCORRECTED_ERRORS),
+       EF10_DMA_STAT(fec_corrected_errors, FEC_CORRECTED_ERRORS),
+       EF10_DMA_STAT(fec_corrected_symbols_lane0, FEC_CORRECTED_SYMBOLS_LANE0),
+       EF10_DMA_STAT(fec_corrected_symbols_lane1, FEC_CORRECTED_SYMBOLS_LANE1),
+       EF10_DMA_STAT(fec_corrected_symbols_lane2, FEC_CORRECTED_SYMBOLS_LANE2),
+       EF10_DMA_STAT(fec_corrected_symbols_lane3, FEC_CORRECTED_SYMBOLS_LANE3),
+       EF10_DMA_STAT(ctpio_dmabuf_start, CTPIO_DMABUF_START),
+       EF10_DMA_STAT(ctpio_vi_busy_fallback, CTPIO_VI_BUSY_FALLBACK),
+       EF10_DMA_STAT(ctpio_long_write_success, CTPIO_LONG_WRITE_SUCCESS),
+       EF10_DMA_STAT(ctpio_missing_dbell_fail, CTPIO_MISSING_DBELL_FAIL),
+       EF10_DMA_STAT(ctpio_overflow_fail, CTPIO_OVERFLOW_FAIL),
+       EF10_DMA_STAT(ctpio_underflow_fail, CTPIO_UNDERFLOW_FAIL),
+       EF10_DMA_STAT(ctpio_timeout_fail, CTPIO_TIMEOUT_FAIL),
+       EF10_DMA_STAT(ctpio_noncontig_wr_fail, CTPIO_NONCONTIG_WR_FAIL),
+       EF10_DMA_STAT(ctpio_frm_clobber_fail, CTPIO_FRM_CLOBBER_FAIL),
+       EF10_DMA_STAT(ctpio_invalid_wr_fail, CTPIO_INVALID_WR_FAIL),
+       EF10_DMA_STAT(ctpio_vi_clobber_fallback, CTPIO_VI_CLOBBER_FALLBACK),
+       EF10_DMA_STAT(ctpio_unqualified_fallback, CTPIO_UNQUALIFIED_FALLBACK),
+       EF10_DMA_STAT(ctpio_runt_fallback, CTPIO_RUNT_FALLBACK),
+       EF10_DMA_STAT(ctpio_success, CTPIO_SUCCESS),
+       EF10_DMA_STAT(ctpio_fallback, CTPIO_FALLBACK),
+       EF10_DMA_STAT(ctpio_poison, CTPIO_POISON),
+       EF10_DMA_STAT(ctpio_erase, CTPIO_ERASE),
 };
 
 #define HUNT_COMMON_STAT_MASK ((1ULL << EF10_STAT_port_tx_bytes) |     \
@@ -1705,6 +1741,43 @@ static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = {
        (1ULL << EF10_STAT_port_rx_dp_hlb_fetch) |                      \
        (1ULL << EF10_STAT_port_rx_dp_hlb_wait))
 
+/* These statistics are only provided if the NIC supports MC_CMD_MAC_STATS_V2,
+ * indicated by returning a value >= MC_CMD_MAC_NSTATS_V2 in
+ * MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS.
+ * These bits are in the second u64 of the raw mask.
+ */
+#define EF10_FEC_STAT_MASK (                                           \
+       (1ULL << (EF10_STAT_fec_uncorrected_errors - 64)) |             \
+       (1ULL << (EF10_STAT_fec_corrected_errors - 64)) |               \
+       (1ULL << (EF10_STAT_fec_corrected_symbols_lane0 - 64)) |        \
+       (1ULL << (EF10_STAT_fec_corrected_symbols_lane1 - 64)) |        \
+       (1ULL << (EF10_STAT_fec_corrected_symbols_lane2 - 64)) |        \
+       (1ULL << (EF10_STAT_fec_corrected_symbols_lane3 - 64)))
+
+/* These statistics are only provided if the NIC supports MC_CMD_MAC_STATS_V3,
+ * indicated by returning a value >= MC_CMD_MAC_NSTATS_V3 in
+ * MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS.
+ * These bits are in the second u64 of the raw mask.
+ */
+#define EF10_CTPIO_STAT_MASK (                                         \
+       (1ULL << (EF10_STAT_ctpio_dmabuf_start - 64)) |                 \
+       (1ULL << (EF10_STAT_ctpio_vi_busy_fallback - 64)) |             \
+       (1ULL << (EF10_STAT_ctpio_long_write_success - 64)) |           \
+       (1ULL << (EF10_STAT_ctpio_missing_dbell_fail - 64)) |           \
+       (1ULL << (EF10_STAT_ctpio_overflow_fail - 64)) |                \
+       (1ULL << (EF10_STAT_ctpio_underflow_fail - 64)) |               \
+       (1ULL << (EF10_STAT_ctpio_timeout_fail - 64)) |                 \
+       (1ULL << (EF10_STAT_ctpio_noncontig_wr_fail - 64)) |            \
+       (1ULL << (EF10_STAT_ctpio_frm_clobber_fail - 64)) |             \
+       (1ULL << (EF10_STAT_ctpio_invalid_wr_fail - 64)) |              \
+       (1ULL << (EF10_STAT_ctpio_vi_clobber_fallback - 64)) |          \
+       (1ULL << (EF10_STAT_ctpio_unqualified_fallback - 64)) |         \
+       (1ULL << (EF10_STAT_ctpio_runt_fallback - 64)) |                \
+       (1ULL << (EF10_STAT_ctpio_success - 64)) |                      \
+       (1ULL << (EF10_STAT_ctpio_fallback - 64)) |                     \
+       (1ULL << (EF10_STAT_ctpio_poison - 64)) |                       \
+       (1ULL << (EF10_STAT_ctpio_erase - 64)))
+
 static u64 efx_ef10_raw_stat_mask(struct efx_nic *efx)
 {
        u64 raw_mask = HUNT_COMMON_STAT_MASK;
@@ -1743,10 +1816,22 @@ static void efx_ef10_get_stat_mask(struct efx_nic *efx, unsigned long *mask)
        if (nic_data->datapath_caps &
            (1 << MC_CMD_GET_CAPABILITIES_OUT_EVB_LBN)) {
                raw_mask[0] |= ~((1ULL << EF10_STAT_rx_unicast) - 1);
-               raw_mask[1] = (1ULL << (EF10_STAT_COUNT - 63)) - 1;
+               raw_mask[1] = (1ULL << (EF10_STAT_V1_COUNT - 64)) - 1;
        } else {
                raw_mask[1] = 0;
        }
+       /* Only show FEC stats when NIC supports MC_CMD_MAC_STATS_V2 */
+       if (efx->num_mac_stats >= MC_CMD_MAC_NSTATS_V2)
+               raw_mask[1] |= EF10_FEC_STAT_MASK;
+
+       /* CTPIO stats appear in V3. Only show them on devices that actually
+        * support CTPIO. Although this driver doesn't use CTPIO others might,
+        * and we may be reporting the stats for the underlying port.
+        */
+       if (efx->num_mac_stats >= MC_CMD_MAC_NSTATS_V3 &&
+           (nic_data->datapath_caps2 &
+            (1 << MC_CMD_GET_CAPABILITIES_V4_OUT_CTPIO_LBN)))
+               raw_mask[1] |= EF10_CTPIO_STAT_MASK;
 
 #if BITS_PER_LONG == 64
        BUILD_BUG_ON(BITS_TO_LONGS(EF10_STAT_COUNT) != 2);
@@ -1850,7 +1935,7 @@ static int efx_ef10_try_update_nic_stats_pf(struct efx_nic *efx)
 
        dma_stats = efx->stats_buffer.addr;
 
-       generation_end = dma_stats[MC_CMD_MAC_GENERATION_END];
+       generation_end = dma_stats[efx->num_mac_stats - 1];
        if (generation_end == EFX_MC_STATS_GENERATION_INVALID)
                return 0;
        rmb();
@@ -1898,7 +1983,7 @@ static int efx_ef10_try_update_nic_stats_vf(struct efx_nic *efx)
        DECLARE_BITMAP(mask, EF10_STAT_COUNT);
        __le64 generation_start, generation_end;
        u64 *stats = nic_data->stats;
-       u32 dma_len = MC_CMD_MAC_NSTATS * sizeof(u64);
+       u32 dma_len = efx->num_mac_stats * sizeof(u64);
        struct efx_buffer stats_buf;
        __le64 *dma_stats;
        int rc;
@@ -1923,7 +2008,7 @@ static int efx_ef10_try_update_nic_stats_vf(struct efx_nic *efx)
        }
 
        dma_stats = stats_buf.addr;
-       dma_stats[MC_CMD_MAC_GENERATION_END] = EFX_MC_STATS_GENERATION_INVALID;
+       dma_stats[efx->num_mac_stats - 1] = EFX_MC_STATS_GENERATION_INVALID;
 
        MCDI_SET_QWORD(inbuf, MAC_STATS_IN_DMA_ADDR, stats_buf.dma_addr);
        MCDI_POPULATE_DWORD_1(inbuf, MAC_STATS_IN_CMD,
@@ -1942,7 +2027,7 @@ static int efx_ef10_try_update_nic_stats_vf(struct efx_nic *efx)
                goto out;
        }
 
-       generation_end = dma_stats[MC_CMD_MAC_GENERATION_END];
+       generation_end = dma_stats[efx->num_mac_stats - 1];
        if (generation_end == EFX_MC_STATS_GENERATION_INVALID) {
                WARN_ON_ONCE(1);
                goto out;
index 7bcbedc..3780161 100644 (file)
@@ -2983,6 +2983,8 @@ static int efx_init_struct(struct efx_nic *efx,
                efx->type->rx_ts_offset - efx->type->rx_prefix_size;
        spin_lock_init(&efx->stats_lock);
        efx->vi_stride = EFX_DEFAULT_VI_STRIDE;
+       efx->num_mac_stats = MC_CMD_MAC_NSTATS;
+       BUILD_BUG_ON(MC_CMD_MAC_NSTATS - 1 != MC_CMD_MAC_GENERATION_END);
        mutex_init(&efx->mac_lock);
        efx->phy_op = &efx_dummy_phy_operations;
        efx->mdio.dev = net_dev;
index 91fb54f..869d76f 100644 (file)
 #define MCDI_HEADER_XFLAGS_WIDTH 8
 /* Request response using event */
 #define MCDI_HEADER_XFLAGS_EVREQ 0x01
+/* Request (and signal) early doorbell return */
+#define MCDI_HEADER_XFLAGS_DBRET 0x02
 
 /* Maximum number of payload bytes */
 #define MCDI_CTL_SDU_LEN_MAX_V1 0xfc
 
 
 /* The MC can generate events for two reasons:
- *   - To complete a shared memory request if XFLAGS_EVREQ was set
+ *   - To advance a shared memory request if XFLAGS_EVREQ was set
  *   - As a notification (link state, i2c event), controlled
  *     via MC_CMD_LOG_CTRL
  *
 /* Returned by MC_CMD_TESTASSERT if the action that should
  * have caused an assertion failed to do so.  */
 #define MC_CMD_ERR_UNREACHABLE 0x1016
+/* This command needs to be processed in the background but there were no
+ * resources to do so. Send it again after a command has completed. */
+#define MC_CMD_ERR_QUEUE_FULL 0x1017
+/* The operation could not be completed because the PCIe link has gone
+ * away.  This error code is never expected to be returned over the TLP
+ * transport. */
+#define MC_CMD_ERR_NO_PCIE 0x1018
+/* The operation could not be completed because the datapath has gone
+ * away.  This is distinct from MC_CMD_ERR_DATAPATH_DISABLED in that the
+ * datapath absence may be temporary*/
+#define MC_CMD_ERR_NO_DATAPATH 0x1019
 
 #define MC_CMD_ERR_CODE_OFST 0
 
 /* enum: Fatal. */
 #define          MCDI_EVENT_LEVEL_FATAL 0x3
 #define       MCDI_EVENT_DATA_OFST 0
+#define       MCDI_EVENT_DATA_LEN 4
 #define        MCDI_EVENT_CMDDONE_SEQ_LBN 0
 #define        MCDI_EVENT_CMDDONE_SEQ_WIDTH 8
 #define        MCDI_EVENT_CMDDONE_DATALEN_LBN 8
 #define        MCDI_EVENT_LINKCHANGE_LP_CAP_WIDTH 16
 #define        MCDI_EVENT_LINKCHANGE_SPEED_LBN 16
 #define        MCDI_EVENT_LINKCHANGE_SPEED_WIDTH 4
+/* enum: Link is down or link speed could not be determined */
+#define          MCDI_EVENT_LINKCHANGE_SPEED_UNKNOWN  0x0
 /* enum: 100Mbs */
 #define          MCDI_EVENT_LINKCHANGE_SPEED_100M  0x1
 /* enum: 1Gbs */
 #define          MCDI_EVENT_LINKCHANGE_SPEED_10G  0x3
 /* enum: 40Gbs */
 #define          MCDI_EVENT_LINKCHANGE_SPEED_40G  0x4
+/* enum: 25Gbs */
+#define          MCDI_EVENT_LINKCHANGE_SPEED_25G  0x5
+/* enum: 50Gbs */
+#define          MCDI_EVENT_LINKCHANGE_SPEED_50G  0x6
+/* enum: 100Gbs */
+#define          MCDI_EVENT_LINKCHANGE_SPEED_100G  0x7
 #define        MCDI_EVENT_LINKCHANGE_FCNTL_LBN 20
 #define        MCDI_EVENT_LINKCHANGE_FCNTL_WIDTH 4
 #define        MCDI_EVENT_LINKCHANGE_LINK_FLAGS_LBN 24
 #define          MCDI_EVENT_AOE_DDR_ECC_STATUS 0xa
 /* enum: PTP status update */
 #define          MCDI_EVENT_AOE_PTP_STATUS 0xb
+/* enum: FPGA header incorrect */
+#define          MCDI_EVENT_AOE_FPGA_LOAD_HEADER_ERR 0xc
+/* enum: FPGA Powered Off due to error in powering up FPGA */
+#define          MCDI_EVENT_AOE_FPGA_POWER_OFF 0xd
+/* enum: AOE FPGA load failed due to MC to MUM communication failure */
+#define          MCDI_EVENT_AOE_FPGA_LOAD_FAILED 0xe
+/* enum: Notify that invalid flash type detected */
+#define          MCDI_EVENT_AOE_INVALID_FPGA_FLASH_TYPE 0xf
+/* enum: Notify that the attempt to run FPGA Controller firmware timedout */
+#define          MCDI_EVENT_AOE_FC_RUN_TIMEDOUT 0x10
+/* enum: Failure to probe one or more FPGA boot flash chips */
+#define          MCDI_EVENT_AOE_FPGA_BOOT_FLASH_INVALID 0x11
+/* enum: FPGA boot-flash contains an invalid image header */
+#define          MCDI_EVENT_AOE_FPGA_BOOT_FLASH_HDR_INVALID 0x12
+/* enum: Failed to program clocks required by the FPGA */
+#define          MCDI_EVENT_AOE_FPGA_CLOCKS_PROGRAM_FAILED 0x13
+/* enum: Notify that FPGA Controller is alive to serve MCDI requests */
+#define          MCDI_EVENT_AOE_FC_RUNNING 0x14
 #define        MCDI_EVENT_AOE_ERR_DATA_LBN 8
 #define        MCDI_EVENT_AOE_ERR_DATA_WIDTH 8
+#define        MCDI_EVENT_AOE_ERR_FC_ASSERT_INFO_LBN 8
+#define        MCDI_EVENT_AOE_ERR_FC_ASSERT_INFO_WIDTH 8
+/* enum: FC Assert happened, but the register information is not available */
+#define          MCDI_EVENT_AOE_ERR_FC_ASSERT_SEEN 0x0
+/* enum: The register information for FC Assert is ready for readinng by driver
+ */
+#define          MCDI_EVENT_AOE_ERR_FC_ASSERT_DATA_READY 0x1
+#define        MCDI_EVENT_AOE_ERR_CODE_FPGA_HEADER_VERIFY_FAILED_LBN 8
+#define        MCDI_EVENT_AOE_ERR_CODE_FPGA_HEADER_VERIFY_FAILED_WIDTH 8
+/* enum: Reading from NV failed */
+#define          MCDI_EVENT_AOE_ERR_FPGA_HEADER_NV_READ_FAIL 0x0
+/* enum: Invalid Magic Number if FPGA header */
+#define          MCDI_EVENT_AOE_ERR_FPGA_HEADER_MAGIC_FAIL 0x1
+/* enum: Invalid Silicon type detected in header */
+#define          MCDI_EVENT_AOE_ERR_FPGA_HEADER_SILICON_TYPE 0x2
+/* enum: Unsupported VRatio */
+#define          MCDI_EVENT_AOE_ERR_FPGA_HEADER_VRATIO 0x3
+/* enum: Unsupported DDR Type */
+#define          MCDI_EVENT_AOE_ERR_FPGA_HEADER_DDR_TYPE 0x4
+/* enum: DDR Voltage out of supported range */
+#define          MCDI_EVENT_AOE_ERR_FPGA_HEADER_DDR_VOLTAGE 0x5
+/* enum: Unsupported DDR speed */
+#define          MCDI_EVENT_AOE_ERR_FPGA_HEADER_DDR_SPEED 0x6
+/* enum: Unsupported DDR size */
+#define          MCDI_EVENT_AOE_ERR_FPGA_HEADER_DDR_SIZE 0x7
+/* enum: Unsupported DDR rank */
+#define          MCDI_EVENT_AOE_ERR_FPGA_HEADER_DDR_RANK 0x8
+#define        MCDI_EVENT_AOE_ERR_CODE_INVALID_FPGA_FLASH_TYPE_INFO_LBN 8
+#define        MCDI_EVENT_AOE_ERR_CODE_INVALID_FPGA_FLASH_TYPE_INFO_WIDTH 8
+/* enum: Primary boot flash */
+#define          MCDI_EVENT_AOE_FLASH_TYPE_BOOT_PRIMARY 0x0
+/* enum: Secondary boot flash */
+#define          MCDI_EVENT_AOE_FLASH_TYPE_BOOT_SECONDARY 0x1
+#define        MCDI_EVENT_AOE_ERR_CODE_FPGA_POWER_OFF_LBN 8
+#define        MCDI_EVENT_AOE_ERR_CODE_FPGA_POWER_OFF_WIDTH 8
+#define        MCDI_EVENT_AOE_ERR_CODE_FPGA_LOAD_FAILED_LBN 8
+#define        MCDI_EVENT_AOE_ERR_CODE_FPGA_LOAD_FAILED_WIDTH 8
 #define        MCDI_EVENT_RX_ERR_RXQ_LBN 0
 #define        MCDI_EVENT_RX_ERR_RXQ_WIDTH 12
 #define        MCDI_EVENT_RX_ERR_TYPE_LBN 12
 #define          MCDI_EVENT_MUM_WATCHDOG 0x3
 #define        MCDI_EVENT_MUM_ERR_DATA_LBN 8
 #define        MCDI_EVENT_MUM_ERR_DATA_WIDTH 8
+#define        MCDI_EVENT_DBRET_SEQ_LBN 0
+#define        MCDI_EVENT_DBRET_SEQ_WIDTH 8
+#define        MCDI_EVENT_SUC_ERR_TYPE_LBN 0
+#define        MCDI_EVENT_SUC_ERR_TYPE_WIDTH 8
+/* enum: Corrupted or bad SUC application. */
+#define          MCDI_EVENT_SUC_BAD_APP 0x1
+/* enum: SUC application reported an assert. */
+#define          MCDI_EVENT_SUC_ASSERT 0x2
+/* enum: SUC application reported an exception. */
+#define          MCDI_EVENT_SUC_EXCEPTION 0x3
+/* enum: SUC watchdog timer expired. */
+#define          MCDI_EVENT_SUC_WATCHDOG 0x4
+#define        MCDI_EVENT_SUC_ERR_ADDRESS_LBN 8
+#define        MCDI_EVENT_SUC_ERR_ADDRESS_WIDTH 24
+#define        MCDI_EVENT_SUC_ERR_DATA_LBN 8
+#define        MCDI_EVENT_SUC_ERR_DATA_WIDTH 24
 #define       MCDI_EVENT_DATA_LBN 0
 #define       MCDI_EVENT_DATA_WIDTH 32
 #define       MCDI_EVENT_SRC_LBN 36
  * been processed and it may now resend the command
  */
 #define          MCDI_EVENT_CODE_PROXY_RESPONSE 0x1d
+/* enum: MCDI command accepted. New commands can be issued but this command is
+ * not done yet.
+ */
+#define          MCDI_EVENT_CODE_DBRET 0x1e
+/* enum: The MC has detected a fault on the SUC */
+#define          MCDI_EVENT_CODE_SUC 0x1f
 /* enum: Artificial event generated by host and posted via MC for test
  * purposes.
  */
 #define          MCDI_EVENT_CODE_TESTGEN  0xfa
 #define       MCDI_EVENT_CMDDONE_DATA_OFST 0
+#define       MCDI_EVENT_CMDDONE_DATA_LEN 4
 #define       MCDI_EVENT_CMDDONE_DATA_LBN 0
 #define       MCDI_EVENT_CMDDONE_DATA_WIDTH 32
 #define       MCDI_EVENT_LINKCHANGE_DATA_OFST 0
+#define       MCDI_EVENT_LINKCHANGE_DATA_LEN 4
 #define       MCDI_EVENT_LINKCHANGE_DATA_LBN 0
 #define       MCDI_EVENT_LINKCHANGE_DATA_WIDTH 32
 #define       MCDI_EVENT_SENSOREVT_DATA_OFST 0
+#define       MCDI_EVENT_SENSOREVT_DATA_LEN 4
 #define       MCDI_EVENT_SENSOREVT_DATA_LBN 0
 #define       MCDI_EVENT_SENSOREVT_DATA_WIDTH 32
 #define       MCDI_EVENT_MAC_STATS_DMA_GENERATION_OFST 0
+#define       MCDI_EVENT_MAC_STATS_DMA_GENERATION_LEN 4
 #define       MCDI_EVENT_MAC_STATS_DMA_GENERATION_LBN 0
 #define       MCDI_EVENT_MAC_STATS_DMA_GENERATION_WIDTH 32
 #define       MCDI_EVENT_TX_ERR_DATA_OFST 0
+#define       MCDI_EVENT_TX_ERR_DATA_LEN 4
 #define       MCDI_EVENT_TX_ERR_DATA_LBN 0
 #define       MCDI_EVENT_TX_ERR_DATA_WIDTH 32
 /* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the seconds field of
  * timestamp
  */
 #define       MCDI_EVENT_PTP_SECONDS_OFST 0
+#define       MCDI_EVENT_PTP_SECONDS_LEN 4
 #define       MCDI_EVENT_PTP_SECONDS_LBN 0
 #define       MCDI_EVENT_PTP_SECONDS_WIDTH 32
 /* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the major field of
  * timestamp
  */
 #define       MCDI_EVENT_PTP_MAJOR_OFST 0
+#define       MCDI_EVENT_PTP_MAJOR_LEN 4
 #define       MCDI_EVENT_PTP_MAJOR_LBN 0
 #define       MCDI_EVENT_PTP_MAJOR_WIDTH 32
 /* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the nanoseconds field
  * of timestamp
  */
 #define       MCDI_EVENT_PTP_NANOSECONDS_OFST 0
+#define       MCDI_EVENT_PTP_NANOSECONDS_LEN 4
 #define       MCDI_EVENT_PTP_NANOSECONDS_LBN 0
 #define       MCDI_EVENT_PTP_NANOSECONDS_WIDTH 32
 /* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the minor field of
  * timestamp
  */
 #define       MCDI_EVENT_PTP_MINOR_OFST 0
+#define       MCDI_EVENT_PTP_MINOR_LEN 4
 #define       MCDI_EVENT_PTP_MINOR_LBN 0
 #define       MCDI_EVENT_PTP_MINOR_WIDTH 32
 /* For CODE_PTP_RX events, the lowest four bytes of sourceUUID from PTP packet
  */
 #define       MCDI_EVENT_PTP_UUID_OFST 0
+#define       MCDI_EVENT_PTP_UUID_LEN 4
 #define       MCDI_EVENT_PTP_UUID_LBN 0
 #define       MCDI_EVENT_PTP_UUID_WIDTH 32
 #define       MCDI_EVENT_RX_ERR_DATA_OFST 0
+#define       MCDI_EVENT_RX_ERR_DATA_LEN 4
 #define       MCDI_EVENT_RX_ERR_DATA_LBN 0
 #define       MCDI_EVENT_RX_ERR_DATA_WIDTH 32
 #define       MCDI_EVENT_PAR_ERR_DATA_OFST 0
+#define       MCDI_EVENT_PAR_ERR_DATA_LEN 4
 #define       MCDI_EVENT_PAR_ERR_DATA_LBN 0
 #define       MCDI_EVENT_PAR_ERR_DATA_WIDTH 32
 #define       MCDI_EVENT_ECC_CORR_ERR_DATA_OFST 0
+#define       MCDI_EVENT_ECC_CORR_ERR_DATA_LEN 4
 #define       MCDI_EVENT_ECC_CORR_ERR_DATA_LBN 0
 #define       MCDI_EVENT_ECC_CORR_ERR_DATA_WIDTH 32
 #define       MCDI_EVENT_ECC_FATAL_ERR_DATA_OFST 0
+#define       MCDI_EVENT_ECC_FATAL_ERR_DATA_LEN 4
 #define       MCDI_EVENT_ECC_FATAL_ERR_DATA_LBN 0
 #define       MCDI_EVENT_ECC_FATAL_ERR_DATA_WIDTH 32
 /* For CODE_PTP_TIME events, the major value of the PTP clock */
 #define       MCDI_EVENT_PTP_TIME_MAJOR_OFST 0
+#define       MCDI_EVENT_PTP_TIME_MAJOR_LEN 4
 #define       MCDI_EVENT_PTP_TIME_MAJOR_LBN 0
 #define       MCDI_EVENT_PTP_TIME_MAJOR_WIDTH 32
 /* For CODE_PTP_TIME events, bits 19-26 of the minor value of the PTP clock */
 #define       MCDI_EVENT_PTP_TIME_MINOR_26_19_LBN 36
 #define       MCDI_EVENT_PTP_TIME_MINOR_26_19_WIDTH 8
+/* For CODE_PTP_TIME events, most significant bits of the minor value of the
+ * PTP clock. This is a more generic equivalent of PTP_TIME_MINOR_26_19.
+ */
+#define       MCDI_EVENT_PTP_TIME_MINOR_MS_8BITS_LBN 36
+#define       MCDI_EVENT_PTP_TIME_MINOR_MS_8BITS_WIDTH 8
 /* For CODE_PTP_TIME events where report sync status is enabled, indicates
  * whether the NIC clock has ever been set
  */
  */
 #define       MCDI_EVENT_PTP_TIME_MINOR_26_21_LBN 38
 #define       MCDI_EVENT_PTP_TIME_MINOR_26_21_WIDTH 6
+/* For CODE_PTP_TIME events, most significant bits of the minor value of the
+ * PTP clock. This is a more generic equivalent of PTP_TIME_MINOR_26_21.
+ */
+#define       MCDI_EVENT_PTP_TIME_MINOR_MS_6BITS_LBN 38
+#define       MCDI_EVENT_PTP_TIME_MINOR_MS_6BITS_WIDTH 6
 #define       MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_OFST 0
+#define       MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_LEN 4
 #define       MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_LBN 0
 #define       MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_WIDTH 32
 #define       MCDI_EVENT_PROXY_RESPONSE_HANDLE_OFST 0
+#define       MCDI_EVENT_PROXY_RESPONSE_HANDLE_LEN 4
 #define       MCDI_EVENT_PROXY_RESPONSE_HANDLE_LBN 0
 #define       MCDI_EVENT_PROXY_RESPONSE_HANDLE_WIDTH 32
 /* Zero means that the request has been completed or authorized, and the driver
  */
 #define       MCDI_EVENT_PROXY_RESPONSE_RC_LBN 36
 #define       MCDI_EVENT_PROXY_RESPONSE_RC_WIDTH 8
+#define       MCDI_EVENT_DBRET_DATA_OFST 0
+#define       MCDI_EVENT_DBRET_DATA_LEN 4
+#define       MCDI_EVENT_DBRET_DATA_LBN 0
+#define       MCDI_EVENT_DBRET_DATA_WIDTH 32
 
 /* FCDI_EVENT structuredef */
 #define    FCDI_EVENT_LEN 8
 /* enum: Fatal. */
 #define          FCDI_EVENT_LEVEL_FATAL 0x3
 #define       FCDI_EVENT_DATA_OFST 0
+#define       FCDI_EVENT_DATA_LEN 4
 #define        FCDI_EVENT_LINK_STATE_STATUS_LBN 0
 #define        FCDI_EVENT_LINK_STATE_STATUS_WIDTH 1
 #define          FCDI_EVENT_LINK_DOWN 0x0 /* enum */
 #define          FCDI_EVENT_REBOOT_FC_FW 0x0 /* enum */
 #define          FCDI_EVENT_REBOOT_FC_BOOTLOADER 0x1 /* enum */
 #define       FCDI_EVENT_ASSERT_INSTR_ADDRESS_OFST 0
+#define       FCDI_EVENT_ASSERT_INSTR_ADDRESS_LEN 4
 #define       FCDI_EVENT_ASSERT_INSTR_ADDRESS_LBN 0
 #define       FCDI_EVENT_ASSERT_INSTR_ADDRESS_WIDTH 32
 #define       FCDI_EVENT_ASSERT_TYPE_LBN 36
 #define       FCDI_EVENT_DDR_TEST_RESULT_STATUS_CODE_LBN 36
 #define       FCDI_EVENT_DDR_TEST_RESULT_STATUS_CODE_WIDTH 8
 #define       FCDI_EVENT_DDR_TEST_RESULT_RESULT_OFST 0
+#define       FCDI_EVENT_DDR_TEST_RESULT_RESULT_LEN 4
 #define       FCDI_EVENT_DDR_TEST_RESULT_RESULT_LBN 0
 #define       FCDI_EVENT_DDR_TEST_RESULT_RESULT_WIDTH 32
 #define       FCDI_EVENT_LINK_STATE_DATA_OFST 0
+#define       FCDI_EVENT_LINK_STATE_DATA_LEN 4
 #define       FCDI_EVENT_LINK_STATE_DATA_LBN 0
 #define       FCDI_EVENT_LINK_STATE_DATA_WIDTH 32
 #define       FCDI_EVENT_PTP_STATE_OFST 0
+#define       FCDI_EVENT_PTP_STATE_LEN 4
 #define          FCDI_EVENT_PTP_UNDEFINED 0x0 /* enum */
 #define          FCDI_EVENT_PTP_SETUP_FAILED 0x1 /* enum */
 #define          FCDI_EVENT_PTP_OPERATIONAL 0x2 /* enum */
 #define       FCDI_EVENT_DDR_ECC_STATUS_BANK_ID_LBN 36
 #define       FCDI_EVENT_DDR_ECC_STATUS_BANK_ID_WIDTH 8
 #define       FCDI_EVENT_DDR_ECC_STATUS_STATUS_OFST 0
+#define       FCDI_EVENT_DDR_ECC_STATUS_STATUS_LEN 4
 #define       FCDI_EVENT_DDR_ECC_STATUS_STATUS_LBN 0
 #define       FCDI_EVENT_DDR_ECC_STATUS_STATUS_WIDTH 32
 /* Index of MC port being referred to */
 #define       FCDI_EVENT_PORT_CONFIG_SRC_WIDTH 8
 /* FC Port index that matches the MC port index in SRC */
 #define       FCDI_EVENT_PORT_CONFIG_DATA_OFST 0
+#define       FCDI_EVENT_PORT_CONFIG_DATA_LEN 4
 #define       FCDI_EVENT_PORT_CONFIG_DATA_LBN 0
 #define       FCDI_EVENT_PORT_CONFIG_DATA_WIDTH 32
 #define       FCDI_EVENT_BOOT_RESULT_OFST 0
+#define       FCDI_EVENT_BOOT_RESULT_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_AOE/MC_CMD_AOE_OUT_INFO/FC_BOOT_RESULT */
 #define       FCDI_EVENT_BOOT_RESULT_LBN 0
 #define    FCDI_EXTENDED_EVENT_PPS_LEN(num) (8+8*(num))
 /* Number of timestamps following */
 #define       FCDI_EXTENDED_EVENT_PPS_COUNT_OFST 0
+#define       FCDI_EXTENDED_EVENT_PPS_COUNT_LEN 4
 #define       FCDI_EXTENDED_EVENT_PPS_COUNT_LBN 0
 #define       FCDI_EXTENDED_EVENT_PPS_COUNT_WIDTH 32
 /* Seconds field of a timestamp record */
 #define       FCDI_EXTENDED_EVENT_PPS_SECONDS_OFST 8
+#define       FCDI_EXTENDED_EVENT_PPS_SECONDS_LEN 4
 #define       FCDI_EXTENDED_EVENT_PPS_SECONDS_LBN 64
 #define       FCDI_EXTENDED_EVENT_PPS_SECONDS_WIDTH 32
 /* Nanoseconds field of a timestamp record */
 #define       FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_OFST 12
+#define       FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_LEN 4
 #define       FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_LBN 96
 #define       FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_WIDTH 32
 /* Timestamp records comprising the event */
 /* enum: Fatal. */
 #define          MUM_EVENT_LEVEL_FATAL 0x3
 #define       MUM_EVENT_DATA_OFST 0
+#define       MUM_EVENT_DATA_LEN 4
 #define        MUM_EVENT_SENSOR_ID_LBN 0
 #define        MUM_EVENT_SENSOR_ID_WIDTH 8
 /*             Enum values, see field(s): */
 /* enum: Link fault has been asserted, or has cleared. */
 #define          MUM_EVENT_CODE_QSFP_LASI_INTERRUPT 0x4
 #define       MUM_EVENT_SENSOR_DATA_OFST 0
+#define       MUM_EVENT_SENSOR_DATA_LEN 4
 #define       MUM_EVENT_SENSOR_DATA_LBN 0
 #define       MUM_EVENT_SENSOR_DATA_WIDTH 32
 #define       MUM_EVENT_PORT_PHY_FLAGS_OFST 0
+#define       MUM_EVENT_PORT_PHY_FLAGS_LEN 4
 #define       MUM_EVENT_PORT_PHY_FLAGS_LBN 0
 #define       MUM_EVENT_PORT_PHY_FLAGS_WIDTH 32
 #define       MUM_EVENT_PORT_PHY_COPPER_LEN_OFST 0
+#define       MUM_EVENT_PORT_PHY_COPPER_LEN_LEN 4
 #define       MUM_EVENT_PORT_PHY_COPPER_LEN_LBN 0
 #define       MUM_EVENT_PORT_PHY_COPPER_LEN_WIDTH 32
 #define       MUM_EVENT_PORT_PHY_CAPS_OFST 0
+#define       MUM_EVENT_PORT_PHY_CAPS_LEN 4
 #define       MUM_EVENT_PORT_PHY_CAPS_LBN 0
 #define       MUM_EVENT_PORT_PHY_CAPS_WIDTH 32
 #define       MUM_EVENT_PORT_PHY_TECH_OFST 0
+#define       MUM_EVENT_PORT_PHY_TECH_LEN 4
 #define          MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_UNKNOWN 0x0 /* enum */
 #define          MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_OPTICAL 0x1 /* enum */
 #define          MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_PASSIVE 0x2 /* enum */
 /* MC_CMD_READ32_IN msgrequest */
 #define    MC_CMD_READ32_IN_LEN 8
 #define       MC_CMD_READ32_IN_ADDR_OFST 0
+#define       MC_CMD_READ32_IN_ADDR_LEN 4
 #define       MC_CMD_READ32_IN_NUMWORDS_OFST 4
+#define       MC_CMD_READ32_IN_NUMWORDS_LEN 4
 
 /* MC_CMD_READ32_OUT msgresponse */
 #define    MC_CMD_READ32_OUT_LENMIN 4
  */
 #define MC_CMD_WRITE32 0x2
 
-#define MC_CMD_0x2_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x2_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_WRITE32_IN msgrequest */
 #define    MC_CMD_WRITE32_IN_LENMIN 8
 #define    MC_CMD_WRITE32_IN_LENMAX 252
 #define    MC_CMD_WRITE32_IN_LEN(num) (4+4*(num))
 #define       MC_CMD_WRITE32_IN_ADDR_OFST 0
+#define       MC_CMD_WRITE32_IN_ADDR_LEN 4
 #define       MC_CMD_WRITE32_IN_BUFFER_OFST 4
 #define       MC_CMD_WRITE32_IN_BUFFER_LEN 4
 #define       MC_CMD_WRITE32_IN_BUFFER_MINNUM 1
  * is a bitfield, with each bit as documented below.
  */
 #define       MC_CMD_COPYCODE_IN_SRC_ADDR_OFST 0
+#define       MC_CMD_COPYCODE_IN_SRC_ADDR_LEN 4
 /* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT (see below) */
 #define          MC_CMD_COPYCODE_HUNT_NO_MAGIC_ADDR 0x10000
 /* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT and
 #define        MC_CMD_COPYCODE_IN_BOOT_MAGIC_DISABLE_XIP_WIDTH 1
 /* Destination address */
 #define       MC_CMD_COPYCODE_IN_DEST_ADDR_OFST 4
+#define       MC_CMD_COPYCODE_IN_DEST_ADDR_LEN 4
 #define       MC_CMD_COPYCODE_IN_NUMWORDS_OFST 8
+#define       MC_CMD_COPYCODE_IN_NUMWORDS_LEN 4
 /* Address of where to jump after copy. */
 #define       MC_CMD_COPYCODE_IN_JUMP_OFST 12
+#define       MC_CMD_COPYCODE_IN_JUMP_LEN 4
 /* enum: Control should return to the caller rather than jumping */
 #define          MC_CMD_COPYCODE_JUMP_NONE 0x1
 
  */
 #define MC_CMD_SET_FUNC 0x4
 
-#define MC_CMD_0x4_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x4_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_SET_FUNC_IN msgrequest */
 #define    MC_CMD_SET_FUNC_IN_LEN 4
 /* Set function */
 #define       MC_CMD_SET_FUNC_IN_FUNC_OFST 0
+#define       MC_CMD_SET_FUNC_IN_FUNC_LEN 4
 
 /* MC_CMD_SET_FUNC_OUT msgresponse */
 #define    MC_CMD_SET_FUNC_OUT_LEN 0
  */
 #define MC_CMD_GET_BOOT_STATUS 0x5
 
-#define MC_CMD_0x5_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x5_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
 /* MC_CMD_GET_BOOT_STATUS_IN msgrequest */
 #define    MC_CMD_GET_BOOT_STATUS_IN_LEN 0
 #define    MC_CMD_GET_BOOT_STATUS_OUT_LEN 8
 /* ?? */
 #define       MC_CMD_GET_BOOT_STATUS_OUT_BOOT_OFFSET_OFST 0
+#define       MC_CMD_GET_BOOT_STATUS_OUT_BOOT_OFFSET_LEN 4
 /* enum: indicates that the MC wasn't flash booted */
 #define          MC_CMD_GET_BOOT_STATUS_OUT_BOOT_OFFSET_NULL  0xdeadbeef
 #define       MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_OFST 4
+#define       MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_LEN 4
 #define        MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_WATCHDOG_LBN 0
 #define        MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_WATCHDOG_WIDTH 1
 #define        MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_PRIMARY_LBN 1
 #define    MC_CMD_GET_ASSERTS_IN_LEN 4
 /* Set to clear assertion */
 #define       MC_CMD_GET_ASSERTS_IN_CLEAR_OFST 0
+#define       MC_CMD_GET_ASSERTS_IN_CLEAR_LEN 4
 
 /* MC_CMD_GET_ASSERTS_OUT msgresponse */
 #define    MC_CMD_GET_ASSERTS_OUT_LEN 140
 /* Assertion status flag. */
 #define       MC_CMD_GET_ASSERTS_OUT_GLOBAL_FLAGS_OFST 0
+#define       MC_CMD_GET_ASSERTS_OUT_GLOBAL_FLAGS_LEN 4
 /* enum: No assertions have failed. */
 #define          MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS 0x1
 /* enum: A system-level assertion has failed. */
 #define          MC_CMD_GET_ASSERTS_FLAGS_ADDR_TRAP 0x5
 /* Failing PC value */
 #define       MC_CMD_GET_ASSERTS_OUT_SAVED_PC_OFFS_OFST 4
+#define       MC_CMD_GET_ASSERTS_OUT_SAVED_PC_OFFS_LEN 4
 /* Saved GP regs */
 #define       MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST 8
 #define       MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_LEN 4
 #define          MC_CMD_GET_ASSERTS_REG_NO_DATA 0xda7a1057
 /* Failing thread address */
 #define       MC_CMD_GET_ASSERTS_OUT_THREAD_OFFS_OFST 132
+#define       MC_CMD_GET_ASSERTS_OUT_THREAD_OFFS_LEN 4
 #define       MC_CMD_GET_ASSERTS_OUT_RESERVED_OFST 136
+#define       MC_CMD_GET_ASSERTS_OUT_RESERVED_LEN 4
 
 
 /***********************************/
 #define    MC_CMD_LOG_CTRL_IN_LEN 8
 /* Log destination */
 #define       MC_CMD_LOG_CTRL_IN_LOG_DEST_OFST 0
+#define       MC_CMD_LOG_CTRL_IN_LOG_DEST_LEN 4
 /* enum: UART. */
 #define          MC_CMD_LOG_CTRL_IN_LOG_DEST_UART 0x1
 /* enum: Event queue. */
 #define          MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ 0x2
 /* Legacy argument. Must be zero. */
 #define       MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ_OFST 4
+#define       MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ_LEN 4
 
 /* MC_CMD_LOG_CTRL_OUT msgresponse */
 #define    MC_CMD_LOG_CTRL_OUT_LEN 0
 #define    MC_CMD_GET_VERSION_EXT_IN_LEN 4
 /* placeholder, set to 0 */
 #define       MC_CMD_GET_VERSION_EXT_IN_EXT_FLAGS_OFST 0
+#define       MC_CMD_GET_VERSION_EXT_IN_EXT_FLAGS_LEN 4
 
 /* MC_CMD_GET_VERSION_V0_OUT msgresponse: deprecated version format */
 #define    MC_CMD_GET_VERSION_V0_OUT_LEN 4
 #define       MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0
+#define       MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4
 /* enum: Reserved version number to indicate "any" version. */
 #define          MC_CMD_GET_VERSION_OUT_FIRMWARE_ANY 0xffffffff
 /* enum: Bootrom version value for Siena. */
 #define          MC_CMD_GET_VERSION_OUT_FIRMWARE_SIENA_BOOTROM 0xb0070000
 /* enum: Bootrom version value for Huntington. */
 #define          MC_CMD_GET_VERSION_OUT_FIRMWARE_HUNT_BOOTROM 0xb0070001
+/* enum: Bootrom version value for Medford2. */
+#define          MC_CMD_GET_VERSION_OUT_FIRMWARE_MEDFORD2_BOOTROM 0xb0070002
 
 /* MC_CMD_GET_VERSION_OUT msgresponse */
 #define    MC_CMD_GET_VERSION_OUT_LEN 32
 /*            MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */
+/*            MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4 */
 /*            Enum values, see field(s): */
 /*               MC_CMD_GET_VERSION_V0_OUT/MC_CMD_GET_VERSION_OUT_FIRMWARE */
 #define       MC_CMD_GET_VERSION_OUT_PCOL_OFST 4
+#define       MC_CMD_GET_VERSION_OUT_PCOL_LEN 4
 /* 128bit mask of functions supported by the current firmware */
 #define       MC_CMD_GET_VERSION_OUT_SUPPORTED_FUNCS_OFST 8
 #define       MC_CMD_GET_VERSION_OUT_SUPPORTED_FUNCS_LEN 16
 /* MC_CMD_GET_VERSION_EXT_OUT msgresponse */
 #define    MC_CMD_GET_VERSION_EXT_OUT_LEN 48
 /*            MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */
+/*            MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4 */
 /*            Enum values, see field(s): */
 /*               MC_CMD_GET_VERSION_V0_OUT/MC_CMD_GET_VERSION_OUT_FIRMWARE */
 #define       MC_CMD_GET_VERSION_EXT_OUT_PCOL_OFST 4
+#define       MC_CMD_GET_VERSION_EXT_OUT_PCOL_LEN 4
 /* 128bit mask of functions supported by the current firmware */
 #define       MC_CMD_GET_VERSION_EXT_OUT_SUPPORTED_FUNCS_OFST 8
 #define       MC_CMD_GET_VERSION_EXT_OUT_SUPPORTED_FUNCS_LEN 16
 #define          MC_CMD_PTP_OP_ENABLE 0x1
 /* enum: Disable PTP packet timestamping operation. */
 #define          MC_CMD_PTP_OP_DISABLE 0x2
-/* enum: Send a PTP packet. */
+/* enum: Send a PTP packet. This operation is used on Siena and Huntington.
+ * From Medford onwards it is not supported: on those platforms PTP transmit
+ * timestamping is done using the fast path.
+ */
 #define          MC_CMD_PTP_OP_TRANSMIT 0x3
 /* enum: Read the current NIC time. */
 #define          MC_CMD_PTP_OP_READ_NIC_TIME 0x4
-/* enum: Get the current PTP status. */
+/* enum: Get the current PTP status. Note that the clock frequency returned (in
+ * Hz) is rounded to the nearest MHz (e.g. 666000000 for 666666666).
+ */
 #define          MC_CMD_PTP_OP_STATUS 0x5
 /* enum: Adjust the PTP NIC's time. */
 #define          MC_CMD_PTP_OP_ADJUST 0x6
 /* enum: Synchronize host and NIC time. */
 #define          MC_CMD_PTP_OP_SYNCHRONIZE 0x7
-/* enum: Basic manufacturing tests. */
+/* enum: Basic manufacturing tests. Siena PTP adapters only. */
 #define          MC_CMD_PTP_OP_MANFTEST_BASIC 0x8
-/* enum: Packet based manufacturing tests. */
+/* enum: Packet based manufacturing tests. Siena PTP adapters only. */
 #define          MC_CMD_PTP_OP_MANFTEST_PACKET 0x9
 /* enum: Reset some of the PTP related statistics */
 #define          MC_CMD_PTP_OP_RESET_STATS 0xa
 /* enum: Debug operations to MC. */
 #define          MC_CMD_PTP_OP_DEBUG 0xb
-/* enum: Read an FPGA register */
+/* enum: Read an FPGA register. Siena PTP adapters only. */
 #define          MC_CMD_PTP_OP_FPGAREAD 0xc
-/* enum: Write an FPGA register */
+/* enum: Write an FPGA register. Siena PTP adapters only. */
 #define          MC_CMD_PTP_OP_FPGAWRITE 0xd
 /* enum: Apply an offset to the NIC clock */
 #define          MC_CMD_PTP_OP_CLOCK_OFFSET_ADJUST 0xe
-/* enum: Change Apply an offset to the NIC clock */
+/* enum: Change the frequency correction applied to the NIC clock */
 #define          MC_CMD_PTP_OP_CLOCK_FREQ_ADJUST 0xf
-/* enum: Set the MC packet filter VLAN tags for received PTP packets */
+/* enum: Set the MC packet filter VLAN tags for received PTP packets.
+ * Deprecated for Huntington onwards.
+ */
 #define          MC_CMD_PTP_OP_RX_SET_VLAN_FILTER 0x10
-/* enum: Set the MC packet filter UUID for received PTP packets */
+/* enum: Set the MC packet filter UUID for received PTP packets. Deprecated for
+ * Huntington onwards.
+ */
 #define          MC_CMD_PTP_OP_RX_SET_UUID_FILTER 0x11
-/* enum: Set the MC packet filter Domain for received PTP packets */
+/* enum: Set the MC packet filter Domain for received PTP packets. Deprecated
+ * for Huntington onwards.
+ */
 #define          MC_CMD_PTP_OP_RX_SET_DOMAIN_FILTER 0x12
-/* enum: Set the clock source */
+/* enum: Set the clock source. Required for snapper tests on Huntington and
+ * Medford. Not implemented for Siena or Medford2.
+ */
 #define          MC_CMD_PTP_OP_SET_CLK_SRC 0x13
-/* enum: Reset value of Timer Reg. */
+/* enum: Reset value of Timer Reg. Not implemented. */
 #define          MC_CMD_PTP_OP_RST_CLK 0x14
 /* enum: Enable the forwarding of PPS events to the host */
 #define          MC_CMD_PTP_OP_PPS_ENABLE 0x15
 /* enum: Unsubscribe to stop receiving time events */
 #define          MC_CMD_PTP_OP_TIME_EVENT_UNSUBSCRIBE 0x19
 /* enum: PPS based manfacturing tests. Requires PPS output to be looped to PPS
- * input on the same NIC.
+ * input on the same NIC. Siena PTP adapters only.
  */
 #define          MC_CMD_PTP_OP_MANFTEST_PPS 0x1a
 /* enum: Set the PTP sync status. Status is used by firmware to report to event
 /* MC_CMD_PTP_IN_ENABLE msgrequest */
 #define    MC_CMD_PTP_IN_ENABLE_LEN 16
 #define       MC_CMD_PTP_IN_CMD_OFST 0
+#define       MC_CMD_PTP_IN_CMD_LEN 4
 #define       MC_CMD_PTP_IN_PERIPH_ID_OFST 4
-/* Event queue for PTP events */
+#define       MC_CMD_PTP_IN_PERIPH_ID_LEN 4
+/* Not used. Events are always sent to function relative queue 0. */
 #define       MC_CMD_PTP_IN_ENABLE_QUEUE_OFST 8
-/* PTP timestamping mode */
+#define       MC_CMD_PTP_IN_ENABLE_QUEUE_LEN 4
+/* PTP timestamping mode. Not used from Huntington onwards. */
 #define       MC_CMD_PTP_IN_ENABLE_MODE_OFST 12
+#define       MC_CMD_PTP_IN_ENABLE_MODE_LEN 4
 /* enum: PTP, version 1 */
 #define          MC_CMD_PTP_MODE_V1 0x0
 /* enum: PTP, version 1, with VLAN headers - deprecated */
 /* MC_CMD_PTP_IN_DISABLE msgrequest */
 #define    MC_CMD_PTP_IN_DISABLE_LEN 8
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
 
 /* MC_CMD_PTP_IN_TRANSMIT msgrequest */
 #define    MC_CMD_PTP_IN_TRANSMIT_LENMIN 13
 #define    MC_CMD_PTP_IN_TRANSMIT_LENMAX 252
 #define    MC_CMD_PTP_IN_TRANSMIT_LEN(num) (12+1*(num))
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
 /* Transmit packet length */
 #define       MC_CMD_PTP_IN_TRANSMIT_LENGTH_OFST 8
+#define       MC_CMD_PTP_IN_TRANSMIT_LENGTH_LEN 4
 /* Transmit packet data */
 #define       MC_CMD_PTP_IN_TRANSMIT_PACKET_OFST 12
 #define       MC_CMD_PTP_IN_TRANSMIT_PACKET_LEN 1
 /* MC_CMD_PTP_IN_READ_NIC_TIME msgrequest */
 #define    MC_CMD_PTP_IN_READ_NIC_TIME_LEN 8
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+
+/* MC_CMD_PTP_IN_READ_NIC_TIME_V2 msgrequest */
+#define    MC_CMD_PTP_IN_READ_NIC_TIME_V2_LEN 8
+/*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
 
 /* MC_CMD_PTP_IN_STATUS msgrequest */
 #define    MC_CMD_PTP_IN_STATUS_LEN 8
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
 
 /* MC_CMD_PTP_IN_ADJUST msgrequest */
 #define    MC_CMD_PTP_IN_ADJUST_LEN 24
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
 /* Frequency adjustment 40 bit fixed point ns */
 #define       MC_CMD_PTP_IN_ADJUST_FREQ_OFST 8
 #define       MC_CMD_PTP_IN_ADJUST_FREQ_LEN 8
 #define       MC_CMD_PTP_IN_ADJUST_FREQ_HI_OFST 12
 /* enum: Number of fractional bits in frequency adjustment */
 #define          MC_CMD_PTP_IN_ADJUST_BITS 0x28
+/* enum: Number of fractional bits in frequency adjustment when FP44_FREQ_ADJ
+ * is indicated in the MC_CMD_PTP_OUT_GET_ATTRIBUTES command CAPABILITIES
+ * field.
+ */
+#define          MC_CMD_PTP_IN_ADJUST_BITS_FP44 0x2c
 /* Time adjustment in seconds */
 #define       MC_CMD_PTP_IN_ADJUST_SECONDS_OFST 16
+#define       MC_CMD_PTP_IN_ADJUST_SECONDS_LEN 4
 /* Time adjustment major value */
 #define       MC_CMD_PTP_IN_ADJUST_MAJOR_OFST 16
+#define       MC_CMD_PTP_IN_ADJUST_MAJOR_LEN 4
 /* Time adjustment in nanoseconds */
 #define       MC_CMD_PTP_IN_ADJUST_NANOSECONDS_OFST 20
+#define       MC_CMD_PTP_IN_ADJUST_NANOSECONDS_LEN 4
 /* Time adjustment minor value */
 #define       MC_CMD_PTP_IN_ADJUST_MINOR_OFST 20
+#define       MC_CMD_PTP_IN_ADJUST_MINOR_LEN 4
+
+/* MC_CMD_PTP_IN_ADJUST_V2 msgrequest */
+#define    MC_CMD_PTP_IN_ADJUST_V2_LEN 28
+/*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+/* Frequency adjustment 40 bit fixed point ns */
+#define       MC_CMD_PTP_IN_ADJUST_V2_FREQ_OFST 8
+#define       MC_CMD_PTP_IN_ADJUST_V2_FREQ_LEN 8
+#define       MC_CMD_PTP_IN_ADJUST_V2_FREQ_LO_OFST 8
+#define       MC_CMD_PTP_IN_ADJUST_V2_FREQ_HI_OFST 12
+/* enum: Number of fractional bits in frequency adjustment */
+/*               MC_CMD_PTP_IN_ADJUST_BITS 0x28 */
+/* enum: Number of fractional bits in frequency adjustment when FP44_FREQ_ADJ
+ * is indicated in the MC_CMD_PTP_OUT_GET_ATTRIBUTES command CAPABILITIES
+ * field.
+ */
+/*               MC_CMD_PTP_IN_ADJUST_BITS_FP44 0x2c */
+/* Time adjustment in seconds */
+#define       MC_CMD_PTP_IN_ADJUST_V2_SECONDS_OFST 16
+#define       MC_CMD_PTP_IN_ADJUST_V2_SECONDS_LEN 4
+/* Time adjustment major value */
+#define       MC_CMD_PTP_IN_ADJUST_V2_MAJOR_OFST 16
+#define       MC_CMD_PTP_IN_ADJUST_V2_MAJOR_LEN 4
+/* Time adjustment in nanoseconds */
+#define       MC_CMD_PTP_IN_ADJUST_V2_NANOSECONDS_OFST 20
+#define       MC_CMD_PTP_IN_ADJUST_V2_NANOSECONDS_LEN 4
+/* Time adjustment minor value */
+#define       MC_CMD_PTP_IN_ADJUST_V2_MINOR_OFST 20
+#define       MC_CMD_PTP_IN_ADJUST_V2_MINOR_LEN 4
+/* Upper 32bits of major time offset adjustment */
+#define       MC_CMD_PTP_IN_ADJUST_V2_MAJOR_HI_OFST 24
+#define       MC_CMD_PTP_IN_ADJUST_V2_MAJOR_HI_LEN 4
 
 /* MC_CMD_PTP_IN_SYNCHRONIZE msgrequest */
 #define    MC_CMD_PTP_IN_SYNCHRONIZE_LEN 20
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
 /* Number of time readings to capture */
 #define       MC_CMD_PTP_IN_SYNCHRONIZE_NUMTIMESETS_OFST 8
+#define       MC_CMD_PTP_IN_SYNCHRONIZE_NUMTIMESETS_LEN 4
 /* Host address in which to write "synchronization started" indication (64
  * bits)
  */
 /* MC_CMD_PTP_IN_MANFTEST_BASIC msgrequest */
 #define    MC_CMD_PTP_IN_MANFTEST_BASIC_LEN 8
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
 
 /* MC_CMD_PTP_IN_MANFTEST_PACKET msgrequest */
 #define    MC_CMD_PTP_IN_MANFTEST_PACKET_LEN 12
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
 /* Enable or disable packet testing */
 #define       MC_CMD_PTP_IN_MANFTEST_PACKET_TEST_ENABLE_OFST 8
+#define       MC_CMD_PTP_IN_MANFTEST_PACKET_TEST_ENABLE_LEN 4
 
 /* MC_CMD_PTP_IN_RESET_STATS msgrequest */
 #define    MC_CMD_PTP_IN_RESET_STATS_LEN 8
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
 /* Reset PTP statistics */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
 
 /* MC_CMD_PTP_IN_DEBUG msgrequest */
 #define    MC_CMD_PTP_IN_DEBUG_LEN 12
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
 /* Debug operations */
 #define       MC_CMD_PTP_IN_DEBUG_DEBUG_PARAM_OFST 8
+#define       MC_CMD_PTP_IN_DEBUG_DEBUG_PARAM_LEN 4
 
 /* MC_CMD_PTP_IN_FPGAREAD msgrequest */
 #define    MC_CMD_PTP_IN_FPGAREAD_LEN 16
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
 #define       MC_CMD_PTP_IN_FPGAREAD_ADDR_OFST 8
+#define       MC_CMD_PTP_IN_FPGAREAD_ADDR_LEN 4
 #define       MC_CMD_PTP_IN_FPGAREAD_NUMBYTES_OFST 12
+#define       MC_CMD_PTP_IN_FPGAREAD_NUMBYTES_LEN 4
 
 /* MC_CMD_PTP_IN_FPGAWRITE msgrequest */
 #define    MC_CMD_PTP_IN_FPGAWRITE_LENMIN 13
 #define    MC_CMD_PTP_IN_FPGAWRITE_LENMAX 252
 #define    MC_CMD_PTP_IN_FPGAWRITE_LEN(num) (12+1*(num))
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
 #define       MC_CMD_PTP_IN_FPGAWRITE_ADDR_OFST 8
+#define       MC_CMD_PTP_IN_FPGAWRITE_ADDR_LEN 4
 #define       MC_CMD_PTP_IN_FPGAWRITE_BUFFER_OFST 12
 #define       MC_CMD_PTP_IN_FPGAWRITE_BUFFER_LEN 1
 #define       MC_CMD_PTP_IN_FPGAWRITE_BUFFER_MINNUM 1
 /* MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST msgrequest */
 #define    MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_LEN 16
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
 /* Time adjustment in seconds */
 #define       MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_SECONDS_OFST 8
+#define       MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_SECONDS_LEN 4
 /* Time adjustment major value */
 #define       MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_MAJOR_OFST 8
+#define       MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_MAJOR_LEN 4
 /* Time adjustment in nanoseconds */
 #define       MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_NANOSECONDS_OFST 12
+#define       MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_NANOSECONDS_LEN 4
 /* Time adjustment minor value */
 #define       MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_MINOR_OFST 12
+#define       MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_MINOR_LEN 4
+
+/* MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2 msgrequest */
+#define    MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_LEN 20
+/*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+/* Time adjustment in seconds */
+#define       MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_SECONDS_OFST 8
+#define       MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_SECONDS_LEN 4
+/* Time adjustment major value */
+#define       MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MAJOR_OFST 8
+#define       MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MAJOR_LEN 4
+/* Time adjustment in nanoseconds */
+#define       MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_NANOSECONDS_OFST 12
+#define       MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_NANOSECONDS_LEN 4
+/* Time adjustment minor value */
+#define       MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MINOR_OFST 12
+#define       MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MINOR_LEN 4
+/* Upper 32bits of major time offset adjustment */
+#define       MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MAJOR_HI_OFST 16
+#define       MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MAJOR_HI_LEN 4
 
 /* MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST msgrequest */
 #define    MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_LEN 16
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
 /* Frequency adjustment 40 bit fixed point ns */
 #define       MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_OFST 8
 #define       MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_LEN 8
 #define       MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_LO_OFST 8
 #define       MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_HI_OFST 12
-/* enum: Number of fractional bits in frequency adjustment */
-/*               MC_CMD_PTP_IN_ADJUST_BITS 0x28 */
+/*            Enum values, see field(s): */
+/*               MC_CMD_PTP/MC_CMD_PTP_IN_ADJUST/FREQ */
 
 /* MC_CMD_PTP_IN_RX_SET_VLAN_FILTER msgrequest */
 #define    MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_LEN 24
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
 /* Number of VLAN tags, 0 if not VLAN */
 #define       MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_NUM_VLAN_TAGS_OFST 8
+#define       MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_NUM_VLAN_TAGS_LEN 4
 /* Set of VLAN tags to filter against */
 #define       MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_VLAN_TAG_OFST 12
 #define       MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_VLAN_TAG_LEN 4
 /* MC_CMD_PTP_IN_RX_SET_UUID_FILTER msgrequest */
 #define    MC_CMD_PTP_IN_RX_SET_UUID_FILTER_LEN 20
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
 /* 1 to enable UUID filtering, 0 to disable */
 #define       MC_CMD_PTP_IN_RX_SET_UUID_FILTER_ENABLE_OFST 8
+#define       MC_CMD_PTP_IN_RX_SET_UUID_FILTER_ENABLE_LEN 4
 /* UUID to filter against */
 #define       MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_OFST 12
 #define       MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_LEN 8
 /* MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER msgrequest */
 #define    MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_LEN 16
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
 /* 1 to enable Domain filtering, 0 to disable */
 #define       MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_ENABLE_OFST 8
+#define       MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_ENABLE_LEN 4
 /* Domain number to filter against */
 #define       MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_DOMAIN_OFST 12
+#define       MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_DOMAIN_LEN 4
 
 /* MC_CMD_PTP_IN_SET_CLK_SRC msgrequest */
 #define    MC_CMD_PTP_IN_SET_CLK_SRC_LEN 12
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
 /* Set the clock source. */
 #define       MC_CMD_PTP_IN_SET_CLK_SRC_CLK_OFST 8
+#define       MC_CMD_PTP_IN_SET_CLK_SRC_CLK_LEN 4
 /* enum: Internal. */
 #define          MC_CMD_PTP_CLK_SRC_INTERNAL 0x0
 /* enum: External. */
 /* MC_CMD_PTP_IN_RST_CLK msgrequest */
 #define    MC_CMD_PTP_IN_RST_CLK_LEN 8
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
 /* Reset value of Timer Reg. */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
 
 /* MC_CMD_PTP_IN_PPS_ENABLE msgrequest */
 #define    MC_CMD_PTP_IN_PPS_ENABLE_LEN 12
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
 /* Enable or disable */
 #define       MC_CMD_PTP_IN_PPS_ENABLE_OP_OFST 4
+#define       MC_CMD_PTP_IN_PPS_ENABLE_OP_LEN 4
 /* enum: Enable */
 #define          MC_CMD_PTP_ENABLE_PPS 0x0
 /* enum: Disable */
 #define          MC_CMD_PTP_DISABLE_PPS 0x1
-/* Queue id to send events back */
+/* Not used. Events are always sent to function relative queue 0. */
 #define       MC_CMD_PTP_IN_PPS_ENABLE_QUEUE_ID_OFST 8
+#define       MC_CMD_PTP_IN_PPS_ENABLE_QUEUE_ID_LEN 4
 
 /* MC_CMD_PTP_IN_GET_TIME_FORMAT msgrequest */
 #define    MC_CMD_PTP_IN_GET_TIME_FORMAT_LEN 8
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
 
 /* MC_CMD_PTP_IN_GET_ATTRIBUTES msgrequest */
 #define    MC_CMD_PTP_IN_GET_ATTRIBUTES_LEN 8
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
 
 /* MC_CMD_PTP_IN_GET_TIMESTAMP_CORRECTIONS msgrequest */
 #define    MC_CMD_PTP_IN_GET_TIMESTAMP_CORRECTIONS_LEN 8
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
 
 /* MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE msgrequest */
 #define    MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_LEN 12
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
 /* Original field containing queue ID. Now extended to include flags. */
 #define       MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_OFST 8
+#define       MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_LEN 4
 #define        MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_ID_LBN 0
 #define        MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_ID_WIDTH 16
 #define        MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_REPORT_SYNC_STATUS_LBN 31
 /* MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE msgrequest */
 #define    MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_LEN 16
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
 /* Unsubscribe options */
 #define       MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_CONTROL_OFST 8
+#define       MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_CONTROL_LEN 4
 /* enum: Unsubscribe a single queue */
 #define          MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_SINGLE 0x0
 /* enum: Unsubscribe all queues */
 #define          MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_ALL 0x1
 /* Event queue ID */
 #define       MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_QUEUE_OFST 12
+#define       MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_QUEUE_LEN 4
 
 /* MC_CMD_PTP_IN_MANFTEST_PPS msgrequest */
 #define    MC_CMD_PTP_IN_MANFTEST_PPS_LEN 12
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
 /* 1 to enable PPS test mode, 0 to disable and return result. */
 #define       MC_CMD_PTP_IN_MANFTEST_PPS_TEST_ENABLE_OFST 8
+#define       MC_CMD_PTP_IN_MANFTEST_PPS_TEST_ENABLE_LEN 4
 
 /* MC_CMD_PTP_IN_SET_SYNC_STATUS msgrequest */
 #define    MC_CMD_PTP_IN_SET_SYNC_STATUS_LEN 24
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
 /* NIC - Host System Clock Synchronization status */
 #define       MC_CMD_PTP_IN_SET_SYNC_STATUS_STATUS_OFST 8
+#define       MC_CMD_PTP_IN_SET_SYNC_STATUS_STATUS_LEN 4
 /* enum: Host System clock and NIC clock are not in sync */
 #define          MC_CMD_PTP_IN_SET_SYNC_STATUS_NOT_IN_SYNC 0x0
 /* enum: Host System clock and NIC clock are synchronized */
  * no longer in sync.
  */
 #define       MC_CMD_PTP_IN_SET_SYNC_STATUS_TIMEOUT_OFST 12
+#define       MC_CMD_PTP_IN_SET_SYNC_STATUS_TIMEOUT_LEN 4
 #define       MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED0_OFST 16
+#define       MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED0_LEN 4
 #define       MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED1_OFST 20
+#define       MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED1_LEN 4
 
 /* MC_CMD_PTP_OUT msgresponse */
 #define    MC_CMD_PTP_OUT_LEN 0
 #define    MC_CMD_PTP_OUT_TRANSMIT_LEN 8
 /* Value of seconds timestamp */
 #define       MC_CMD_PTP_OUT_TRANSMIT_SECONDS_OFST 0
+#define       MC_CMD_PTP_OUT_TRANSMIT_SECONDS_LEN 4
 /* Timestamp major value */
 #define       MC_CMD_PTP_OUT_TRANSMIT_MAJOR_OFST 0
+#define       MC_CMD_PTP_OUT_TRANSMIT_MAJOR_LEN 4
 /* Value of nanoseconds timestamp */
 #define       MC_CMD_PTP_OUT_TRANSMIT_NANOSECONDS_OFST 4
+#define       MC_CMD_PTP_OUT_TRANSMIT_NANOSECONDS_LEN 4
 /* Timestamp minor value */
 #define       MC_CMD_PTP_OUT_TRANSMIT_MINOR_OFST 4
+#define       MC_CMD_PTP_OUT_TRANSMIT_MINOR_LEN 4
 
 /* MC_CMD_PTP_OUT_TIME_EVENT_SUBSCRIBE msgresponse */
 #define    MC_CMD_PTP_OUT_TIME_EVENT_SUBSCRIBE_LEN 0
 #define    MC_CMD_PTP_OUT_READ_NIC_TIME_LEN 8
 /* Value of seconds timestamp */
 #define       MC_CMD_PTP_OUT_READ_NIC_TIME_SECONDS_OFST 0
+#define       MC_CMD_PTP_OUT_READ_NIC_TIME_SECONDS_LEN 4
 /* Timestamp major value */
 #define       MC_CMD_PTP_OUT_READ_NIC_TIME_MAJOR_OFST 0
+#define       MC_CMD_PTP_OUT_READ_NIC_TIME_MAJOR_LEN 4
 /* Value of nanoseconds timestamp */
 #define       MC_CMD_PTP_OUT_READ_NIC_TIME_NANOSECONDS_OFST 4
+#define       MC_CMD_PTP_OUT_READ_NIC_TIME_NANOSECONDS_LEN 4
 /* Timestamp minor value */
 #define       MC_CMD_PTP_OUT_READ_NIC_TIME_MINOR_OFST 4
+#define       MC_CMD_PTP_OUT_READ_NIC_TIME_MINOR_LEN 4
+
+/* MC_CMD_PTP_OUT_READ_NIC_TIME_V2 msgresponse */
+#define    MC_CMD_PTP_OUT_READ_NIC_TIME_V2_LEN 12
+/* Value of seconds timestamp */
+#define       MC_CMD_PTP_OUT_READ_NIC_TIME_V2_SECONDS_OFST 0
+#define       MC_CMD_PTP_OUT_READ_NIC_TIME_V2_SECONDS_LEN 4
+/* Timestamp major value */
+#define       MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MAJOR_OFST 0
+#define       MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MAJOR_LEN 4
+/* Value of nanoseconds timestamp */
+#define       MC_CMD_PTP_OUT_READ_NIC_TIME_V2_NANOSECONDS_OFST 4
+#define       MC_CMD_PTP_OUT_READ_NIC_TIME_V2_NANOSECONDS_LEN 4
+/* Timestamp minor value */
+#define       MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MINOR_OFST 4
+#define       MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MINOR_LEN 4
+/* Upper 32bits of major timestamp value */
+#define       MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MAJOR_HI_OFST 8
+#define       MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MAJOR_HI_LEN 4
 
 /* MC_CMD_PTP_OUT_STATUS msgresponse */
 #define    MC_CMD_PTP_OUT_STATUS_LEN 64
 /* Frequency of NIC's hardware clock */
 #define       MC_CMD_PTP_OUT_STATUS_CLOCK_FREQ_OFST 0
+#define       MC_CMD_PTP_OUT_STATUS_CLOCK_FREQ_LEN 4
 /* Number of packets transmitted and timestamped */
 #define       MC_CMD_PTP_OUT_STATUS_STATS_TX_OFST 4
+#define       MC_CMD_PTP_OUT_STATUS_STATS_TX_LEN 4
 /* Number of packets received and timestamped */
 #define       MC_CMD_PTP_OUT_STATUS_STATS_RX_OFST 8
+#define       MC_CMD_PTP_OUT_STATUS_STATS_RX_LEN 4
 /* Number of packets timestamped by the FPGA */
 #define       MC_CMD_PTP_OUT_STATUS_STATS_TS_OFST 12
+#define       MC_CMD_PTP_OUT_STATUS_STATS_TS_LEN 4
 /* Number of packets filter matched */
 #define       MC_CMD_PTP_OUT_STATUS_STATS_FM_OFST 16
+#define       MC_CMD_PTP_OUT_STATUS_STATS_FM_LEN 4
 /* Number of packets not filter matched */
 #define       MC_CMD_PTP_OUT_STATUS_STATS_NFM_OFST 20
+#define       MC_CMD_PTP_OUT_STATUS_STATS_NFM_LEN 4
 /* Number of PPS overflows (noise on input?) */
 #define       MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFLOW_OFST 24
+#define       MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFLOW_LEN 4
 /* Number of PPS bad periods */
 #define       MC_CMD_PTP_OUT_STATUS_STATS_PPS_BAD_OFST 28
+#define       MC_CMD_PTP_OUT_STATUS_STATS_PPS_BAD_LEN 4
 /* Minimum period of PPS pulse in nanoseconds */
 #define       MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MIN_OFST 32
+#define       MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MIN_LEN 4
 /* Maximum period of PPS pulse in nanoseconds */
 #define       MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MAX_OFST 36
+#define       MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MAX_LEN 4
 /* Last period of PPS pulse in nanoseconds */
 #define       MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_LAST_OFST 40
+#define       MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_LAST_LEN 4
 /* Mean period of PPS pulse in nanoseconds */
 #define       MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MEAN_OFST 44
+#define       MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MEAN_LEN 4
 /* Minimum offset of PPS pulse in nanoseconds (signed) */
 #define       MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MIN_OFST 48
+#define       MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MIN_LEN 4
 /* Maximum offset of PPS pulse in nanoseconds (signed) */
 #define       MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MAX_OFST 52
+#define       MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MAX_LEN 4
 /* Last offset of PPS pulse in nanoseconds (signed) */
 #define       MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_LAST_OFST 56
+#define       MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_LAST_LEN 4
 /* Mean offset of PPS pulse in nanoseconds (signed) */
 #define       MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MEAN_OFST 60
+#define       MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MEAN_LEN 4
 
 /* MC_CMD_PTP_OUT_SYNCHRONIZE msgresponse */
 #define    MC_CMD_PTP_OUT_SYNCHRONIZE_LENMIN 20
 #define       MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MAXNUM 12
 /* Host time immediately before NIC's hardware clock read */
 #define       MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTSTART_OFST 0
+#define       MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTSTART_LEN 4
 /* Value of seconds timestamp */
 #define       MC_CMD_PTP_OUT_SYNCHRONIZE_SECONDS_OFST 4
+#define       MC_CMD_PTP_OUT_SYNCHRONIZE_SECONDS_LEN 4
 /* Timestamp major value */
 #define       MC_CMD_PTP_OUT_SYNCHRONIZE_MAJOR_OFST 4
+#define       MC_CMD_PTP_OUT_SYNCHRONIZE_MAJOR_LEN 4
 /* Value of nanoseconds timestamp */
 #define       MC_CMD_PTP_OUT_SYNCHRONIZE_NANOSECONDS_OFST 8
+#define       MC_CMD_PTP_OUT_SYNCHRONIZE_NANOSECONDS_LEN 4
 /* Timestamp minor value */
 #define       MC_CMD_PTP_OUT_SYNCHRONIZE_MINOR_OFST 8
+#define       MC_CMD_PTP_OUT_SYNCHRONIZE_MINOR_LEN 4
 /* Host time immediately after NIC's hardware clock read */
 #define       MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTEND_OFST 12
+#define       MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTEND_LEN 4
 /* Number of nanoseconds waited after reading NIC's hardware clock */
 #define       MC_CMD_PTP_OUT_SYNCHRONIZE_WAITNS_OFST 16
+#define       MC_CMD_PTP_OUT_SYNCHRONIZE_WAITNS_LEN 4
 
 /* MC_CMD_PTP_OUT_MANFTEST_BASIC msgresponse */
 #define    MC_CMD_PTP_OUT_MANFTEST_BASIC_LEN 8
 /* Results of testing */
 #define       MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_RESULT_OFST 0
+#define       MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_RESULT_LEN 4
 /* enum: Successful test */
 #define          MC_CMD_PTP_MANF_SUCCESS 0x0
 /* enum: FPGA load failed */
 #define          MC_CMD_PTP_MANF_CLOCK_READ 0xe
 /* Presence of external oscillator */
 #define       MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_EXTOSC_OFST 4
+#define       MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_EXTOSC_LEN 4
 
 /* MC_CMD_PTP_OUT_MANFTEST_PACKET msgresponse */
 #define    MC_CMD_PTP_OUT_MANFTEST_PACKET_LEN 12
 /* Results of testing */
 #define       MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_RESULT_OFST 0
+#define       MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_RESULT_LEN 4
 /* Number of packets received by FPGA */
 #define       MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FPGACOUNT_OFST 4
+#define       MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FPGACOUNT_LEN 4
 /* Number of packets received by Siena filters */
 #define       MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FILTERCOUNT_OFST 8
+#define       MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FILTERCOUNT_LEN 4
 
 /* MC_CMD_PTP_OUT_FPGAREAD msgresponse */
 #define    MC_CMD_PTP_OUT_FPGAREAD_LENMIN 1
 /* Time format required/used by for this NIC. Applies to all PTP MCDI
  * operations that pass times between the host and firmware. If this operation
  * is not supported (older firmware) a format of seconds and nanoseconds should
- * be assumed.
+ * be assumed. Note this enum is deprecated. Do not add to it- use the
+ * TIME_FORMAT field in MC_CMD_PTP_OUT_GET_ATTRIBUTES instead.
  */
 #define       MC_CMD_PTP_OUT_GET_TIME_FORMAT_FORMAT_OFST 0
+#define       MC_CMD_PTP_OUT_GET_TIME_FORMAT_FORMAT_LEN 4
 /* enum: Times are in seconds and nanoseconds */
 #define          MC_CMD_PTP_OUT_GET_TIME_FORMAT_SECONDS_NANOSECONDS 0x0
 /* enum: Major register has units of 16 second per tick, minor 8 ns per tick */
  * be assumed.
  */
 #define       MC_CMD_PTP_OUT_GET_ATTRIBUTES_TIME_FORMAT_OFST 0
+#define       MC_CMD_PTP_OUT_GET_ATTRIBUTES_TIME_FORMAT_LEN 4
 /* enum: Times are in seconds and nanoseconds */
 #define          MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_NANOSECONDS 0x0
 /* enum: Major register has units of 16 second per tick, minor 8 ns per tick */
 #define          MC_CMD_PTP_OUT_GET_ATTRIBUTES_16SECONDS_8NANOSECONDS 0x1
 /* enum: Major register has units of seconds, minor 2^-27s per tick */
 #define          MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_27FRACTION 0x2
+/* enum: Major register units are seconds, minor units are quarter nanoseconds
+ */
+#define          MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_QTR_NANOSECONDS 0x3
 /* Minimum acceptable value for a corrected synchronization timeset. When
  * comparing host and NIC clock times, the MC returns a set of samples that
  * contain the host start and end time, the MC time when the host start was
  * end and start times minus the time that the MC waited for host end.
  */
 #define       MC_CMD_PTP_OUT_GET_ATTRIBUTES_SYNC_WINDOW_MIN_OFST 4
+#define       MC_CMD_PTP_OUT_GET_ATTRIBUTES_SYNC_WINDOW_MIN_LEN 4
 /* Various PTP capabilities */
 #define       MC_CMD_PTP_OUT_GET_ATTRIBUTES_CAPABILITIES_OFST 8
+#define       MC_CMD_PTP_OUT_GET_ATTRIBUTES_CAPABILITIES_LEN 4
 #define        MC_CMD_PTP_OUT_GET_ATTRIBUTES_REPORT_SYNC_STATUS_LBN 0
 #define        MC_CMD_PTP_OUT_GET_ATTRIBUTES_REPORT_SYNC_STATUS_WIDTH 1
 #define        MC_CMD_PTP_OUT_GET_ATTRIBUTES_RX_TSTAMP_OOB_LBN 1
 #define        MC_CMD_PTP_OUT_GET_ATTRIBUTES_RX_TSTAMP_OOB_WIDTH 1
+#define        MC_CMD_PTP_OUT_GET_ATTRIBUTES_64BIT_SECONDS_LBN 2
+#define        MC_CMD_PTP_OUT_GET_ATTRIBUTES_64BIT_SECONDS_WIDTH 1
+#define        MC_CMD_PTP_OUT_GET_ATTRIBUTES_FP44_FREQ_ADJ_LBN 3
+#define        MC_CMD_PTP_OUT_GET_ATTRIBUTES_FP44_FREQ_ADJ_WIDTH 1
 #define       MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED0_OFST 12
+#define       MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED0_LEN 4
 #define       MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED1_OFST 16
+#define       MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED1_LEN 4
 #define       MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED2_OFST 20
+#define       MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED2_LEN 4
 
 /* MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS msgresponse */
 #define    MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_LEN 16
 /* Uncorrected error on PTP transmit timestamps in NIC clock format */
 #define       MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_TRANSMIT_OFST 0
+#define       MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_TRANSMIT_LEN 4
 /* Uncorrected error on PTP receive timestamps in NIC clock format */
 #define       MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_RECEIVE_OFST 4
+#define       MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_RECEIVE_LEN 4
 /* Uncorrected error on PPS output in NIC clock format */
 #define       MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_OUT_OFST 8
+#define       MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_OUT_LEN 4
 /* Uncorrected error on PPS input in NIC clock format */
 #define       MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_IN_OFST 12
+#define       MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_IN_LEN 4
 
 /* MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2 msgresponse */
 #define    MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_LEN 24
 /* Uncorrected error on PTP transmit timestamps in NIC clock format */
 #define       MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PTP_TX_OFST 0
+#define       MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PTP_TX_LEN 4
 /* Uncorrected error on PTP receive timestamps in NIC clock format */
 #define       MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PTP_RX_OFST 4
+#define       MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PTP_RX_LEN 4
 /* Uncorrected error on PPS output in NIC clock format */
 #define       MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PPS_OUT_OFST 8
+#define       MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PPS_OUT_LEN 4
 /* Uncorrected error on PPS input in NIC clock format */
 #define       MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PPS_IN_OFST 12
+#define       MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PPS_IN_LEN 4
 /* Uncorrected error on non-PTP transmit timestamps in NIC clock format */
 #define       MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_TX_OFST 16
+#define       MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_TX_LEN 4
 /* Uncorrected error on non-PTP receive timestamps in NIC clock format */
 #define       MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_RX_OFST 20
+#define       MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_RX_LEN 4
 
 /* MC_CMD_PTP_OUT_MANFTEST_PPS msgresponse */
 #define    MC_CMD_PTP_OUT_MANFTEST_PPS_LEN 4
 /* Results of testing */
 #define       MC_CMD_PTP_OUT_MANFTEST_PPS_TEST_RESULT_OFST 0
+#define       MC_CMD_PTP_OUT_MANFTEST_PPS_TEST_RESULT_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_PTP_OUT_MANFTEST_BASIC/TEST_RESULT */
 
  */
 #define MC_CMD_CSR_READ32 0xc
 
-#define MC_CMD_0xc_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xc_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_CSR_READ32_IN msgrequest */
 #define    MC_CMD_CSR_READ32_IN_LEN 12
 /* Address */
 #define       MC_CMD_CSR_READ32_IN_ADDR_OFST 0
+#define       MC_CMD_CSR_READ32_IN_ADDR_LEN 4
 #define       MC_CMD_CSR_READ32_IN_STEP_OFST 4
+#define       MC_CMD_CSR_READ32_IN_STEP_LEN 4
 #define       MC_CMD_CSR_READ32_IN_NUMWORDS_OFST 8
+#define       MC_CMD_CSR_READ32_IN_NUMWORDS_LEN 4
 
 /* MC_CMD_CSR_READ32_OUT msgresponse */
 #define    MC_CMD_CSR_READ32_OUT_LENMIN 4
  */
 #define MC_CMD_CSR_WRITE32 0xd
 
-#define MC_CMD_0xd_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xd_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_CSR_WRITE32_IN msgrequest */
 #define    MC_CMD_CSR_WRITE32_IN_LENMIN 12
 #define    MC_CMD_CSR_WRITE32_IN_LEN(num) (8+4*(num))
 /* Address */
 #define       MC_CMD_CSR_WRITE32_IN_ADDR_OFST 0
+#define       MC_CMD_CSR_WRITE32_IN_ADDR_LEN 4
 #define       MC_CMD_CSR_WRITE32_IN_STEP_OFST 4
+#define       MC_CMD_CSR_WRITE32_IN_STEP_LEN 4
 #define       MC_CMD_CSR_WRITE32_IN_BUFFER_OFST 8
 #define       MC_CMD_CSR_WRITE32_IN_BUFFER_LEN 4
 #define       MC_CMD_CSR_WRITE32_IN_BUFFER_MINNUM 1
 /* MC_CMD_CSR_WRITE32_OUT msgresponse */
 #define    MC_CMD_CSR_WRITE32_OUT_LEN 4
 #define       MC_CMD_CSR_WRITE32_OUT_STATUS_OFST 0
+#define       MC_CMD_CSR_WRITE32_OUT_STATUS_LEN 4
 
 
 /***********************************/
  * sensors.
  */
 #define       MC_CMD_HP_IN_SUBCMD_OFST 0
+#define       MC_CMD_HP_IN_SUBCMD_LEN 4
 /* enum: OCSD (Option Card Sensor Data) sub-command. */
 #define          MC_CMD_HP_IN_OCSD_SUBCMD 0x0
 /* enum: Last known valid HP sub-command. */
  * NULL.)
  */
 #define       MC_CMD_HP_IN_OCSD_INTERVAL_OFST 12
+#define       MC_CMD_HP_IN_OCSD_INTERVAL_LEN 4
 
 /* MC_CMD_HP_OUT msgresponse */
 #define    MC_CMD_HP_OUT_LEN 4
 #define       MC_CMD_HP_OUT_OCSD_STATUS_OFST 0
+#define       MC_CMD_HP_OUT_OCSD_STATUS_LEN 4
 /* enum: OCSD stopped for this card. */
 #define          MC_CMD_HP_OUT_OCSD_STOPPED 0x1
 /* enum: OCSD was successfully started with the address provided. */
  * external devices.
  */
 #define       MC_CMD_MDIO_READ_IN_BUS_OFST 0
+#define       MC_CMD_MDIO_READ_IN_BUS_LEN 4
 /* enum: Internal. */
 #define          MC_CMD_MDIO_BUS_INTERNAL 0x0
 /* enum: External. */
 #define          MC_CMD_MDIO_BUS_EXTERNAL 0x1
 /* Port address */
 #define       MC_CMD_MDIO_READ_IN_PRTAD_OFST 4
+#define       MC_CMD_MDIO_READ_IN_PRTAD_LEN 4
 /* Device Address or clause 22. */
 #define       MC_CMD_MDIO_READ_IN_DEVAD_OFST 8
+#define       MC_CMD_MDIO_READ_IN_DEVAD_LEN 4
 /* enum: By default all the MCDI MDIO operations perform clause45 mode. If you
  * want to use clause22 then set DEVAD = MC_CMD_MDIO_CLAUSE22.
  */
 #define          MC_CMD_MDIO_CLAUSE22 0x20
 /* Address */
 #define       MC_CMD_MDIO_READ_IN_ADDR_OFST 12
+#define       MC_CMD_MDIO_READ_IN_ADDR_LEN 4
 
 /* MC_CMD_MDIO_READ_OUT msgresponse */
 #define    MC_CMD_MDIO_READ_OUT_LEN 8
 /* Value */
 #define       MC_CMD_MDIO_READ_OUT_VALUE_OFST 0
+#define       MC_CMD_MDIO_READ_OUT_VALUE_LEN 4
 /* Status the MDIO commands return the raw status bits from the MDIO block. A
  * "good" transaction should have the DONE bit set and all other bits clear.
  */
 #define       MC_CMD_MDIO_READ_OUT_STATUS_OFST 4
+#define       MC_CMD_MDIO_READ_OUT_STATUS_LEN 4
 /* enum: Good. */
 #define          MC_CMD_MDIO_STATUS_GOOD 0x8
 
  * external devices.
  */
 #define       MC_CMD_MDIO_WRITE_IN_BUS_OFST 0
+#define       MC_CMD_MDIO_WRITE_IN_BUS_LEN 4
 /* enum: Internal. */
 /*               MC_CMD_MDIO_BUS_INTERNAL 0x0 */
 /* enum: External. */
 /*               MC_CMD_MDIO_BUS_EXTERNAL 0x1 */
 /* Port address */
 #define       MC_CMD_MDIO_WRITE_IN_PRTAD_OFST 4
+#define       MC_CMD_MDIO_WRITE_IN_PRTAD_LEN 4
 /* Device Address or clause 22. */
 #define       MC_CMD_MDIO_WRITE_IN_DEVAD_OFST 8
+#define       MC_CMD_MDIO_WRITE_IN_DEVAD_LEN 4
 /* enum: By default all the MCDI MDIO operations perform clause45 mode. If you
  * want to use clause22 then set DEVAD = MC_CMD_MDIO_CLAUSE22.
  */
 /*               MC_CMD_MDIO_CLAUSE22 0x20 */
 /* Address */
 #define       MC_CMD_MDIO_WRITE_IN_ADDR_OFST 12
+#define       MC_CMD_MDIO_WRITE_IN_ADDR_LEN 4
 /* Value */
 #define       MC_CMD_MDIO_WRITE_IN_VALUE_OFST 16
+#define       MC_CMD_MDIO_WRITE_IN_VALUE_LEN 4
 
 /* MC_CMD_MDIO_WRITE_OUT msgresponse */
 #define    MC_CMD_MDIO_WRITE_OUT_LEN 4
  * "good" transaction should have the DONE bit set and all other bits clear.
  */
 #define       MC_CMD_MDIO_WRITE_OUT_STATUS_OFST 0
+#define       MC_CMD_MDIO_WRITE_OUT_STATUS_LEN 4
 /* enum: Good. */
 /*               MC_CMD_MDIO_STATUS_GOOD 0x8 */
 
  */
 #define MC_CMD_DBI_WRITE 0x12
 
-#define MC_CMD_0x12_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x12_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_DBI_WRITE_IN msgrequest */
 #define    MC_CMD_DBI_WRITE_IN_LENMIN 12
 /* MC_CMD_DBIWROP_TYPEDEF structuredef */
 #define    MC_CMD_DBIWROP_TYPEDEF_LEN 12
 #define       MC_CMD_DBIWROP_TYPEDEF_ADDRESS_OFST 0
+#define       MC_CMD_DBIWROP_TYPEDEF_ADDRESS_LEN 4
 #define       MC_CMD_DBIWROP_TYPEDEF_ADDRESS_LBN 0
 #define       MC_CMD_DBIWROP_TYPEDEF_ADDRESS_WIDTH 32
 #define       MC_CMD_DBIWROP_TYPEDEF_PARMS_OFST 4
+#define       MC_CMD_DBIWROP_TYPEDEF_PARMS_LEN 4
 #define        MC_CMD_DBIWROP_TYPEDEF_VF_NUM_LBN 16
 #define        MC_CMD_DBIWROP_TYPEDEF_VF_NUM_WIDTH 16
 #define        MC_CMD_DBIWROP_TYPEDEF_VF_ACTIVE_LBN 15
 #define       MC_CMD_DBIWROP_TYPEDEF_PARMS_LBN 32
 #define       MC_CMD_DBIWROP_TYPEDEF_PARMS_WIDTH 32
 #define       MC_CMD_DBIWROP_TYPEDEF_VALUE_OFST 8
+#define       MC_CMD_DBIWROP_TYPEDEF_VALUE_LEN 4
 #define       MC_CMD_DBIWROP_TYPEDEF_VALUE_LBN 64
 #define       MC_CMD_DBIWROP_TYPEDEF_VALUE_WIDTH 32
 
 #define    MC_CMD_PORT_READ32_IN_LEN 4
 /* Address */
 #define       MC_CMD_PORT_READ32_IN_ADDR_OFST 0
+#define       MC_CMD_PORT_READ32_IN_ADDR_LEN 4
 
 /* MC_CMD_PORT_READ32_OUT msgresponse */
 #define    MC_CMD_PORT_READ32_OUT_LEN 8
 /* Value */
 #define       MC_CMD_PORT_READ32_OUT_VALUE_OFST 0
+#define       MC_CMD_PORT_READ32_OUT_VALUE_LEN 4
 /* Status */
 #define       MC_CMD_PORT_READ32_OUT_STATUS_OFST 4
+#define       MC_CMD_PORT_READ32_OUT_STATUS_LEN 4
 
 
 /***********************************/
 #define    MC_CMD_PORT_WRITE32_IN_LEN 8
 /* Address */
 #define       MC_CMD_PORT_WRITE32_IN_ADDR_OFST 0
+#define       MC_CMD_PORT_WRITE32_IN_ADDR_LEN 4
 /* Value */
 #define       MC_CMD_PORT_WRITE32_IN_VALUE_OFST 4
+#define       MC_CMD_PORT_WRITE32_IN_VALUE_LEN 4
 
 /* MC_CMD_PORT_WRITE32_OUT msgresponse */
 #define    MC_CMD_PORT_WRITE32_OUT_LEN 4
 /* Status */
 #define       MC_CMD_PORT_WRITE32_OUT_STATUS_OFST 0
+#define       MC_CMD_PORT_WRITE32_OUT_STATUS_LEN 4
 
 
 /***********************************/
 #define    MC_CMD_PORT_READ128_IN_LEN 4
 /* Address */
 #define       MC_CMD_PORT_READ128_IN_ADDR_OFST 0
+#define       MC_CMD_PORT_READ128_IN_ADDR_LEN 4
 
 /* MC_CMD_PORT_READ128_OUT msgresponse */
 #define    MC_CMD_PORT_READ128_OUT_LEN 20
 #define       MC_CMD_PORT_READ128_OUT_VALUE_LEN 16
 /* Status */
 #define       MC_CMD_PORT_READ128_OUT_STATUS_OFST 16
+#define       MC_CMD_PORT_READ128_OUT_STATUS_LEN 4
 
 
 /***********************************/
 #define    MC_CMD_PORT_WRITE128_IN_LEN 20
 /* Address */
 #define       MC_CMD_PORT_WRITE128_IN_ADDR_OFST 0
+#define       MC_CMD_PORT_WRITE128_IN_ADDR_LEN 4
 /* Value */
 #define       MC_CMD_PORT_WRITE128_IN_VALUE_OFST 4
 #define       MC_CMD_PORT_WRITE128_IN_VALUE_LEN 16
 #define    MC_CMD_PORT_WRITE128_OUT_LEN 4
 /* Status */
 #define       MC_CMD_PORT_WRITE128_OUT_STATUS_OFST 0
+#define       MC_CMD_PORT_WRITE128_OUT_STATUS_LEN 4
 
 /* MC_CMD_CAPABILITIES structuredef */
 #define    MC_CMD_CAPABILITIES_LEN 4
 #define    MC_CMD_GET_BOARD_CFG_OUT_LENMAX 136
 #define    MC_CMD_GET_BOARD_CFG_OUT_LEN(num) (72+2*(num))
 #define       MC_CMD_GET_BOARD_CFG_OUT_BOARD_TYPE_OFST 0
+#define       MC_CMD_GET_BOARD_CFG_OUT_BOARD_TYPE_LEN 4
 #define       MC_CMD_GET_BOARD_CFG_OUT_BOARD_NAME_OFST 4
 #define       MC_CMD_GET_BOARD_CFG_OUT_BOARD_NAME_LEN 32
-/* See MC_CMD_CAPABILITIES */
+/* Capabilities for Siena Port0 (see struct MC_CMD_CAPABILITIES). Unused on
+ * EF10 and later (use MC_CMD_GET_CAPABILITIES).
+ */
 #define       MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT0_OFST 36
-/* See MC_CMD_CAPABILITIES */
+#define       MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT0_LEN 4
+/* Capabilities for Siena Port1 (see struct MC_CMD_CAPABILITIES). Unused on
+ * EF10 and later (use MC_CMD_GET_CAPABILITIES).
+ */
 #define       MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT1_OFST 40
+#define       MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT1_LEN 4
+/* Base MAC address for Siena Port0. Unused on EF10 and later (use
+ * MC_CMD_GET_MAC_ADDRESSES).
+ */
 #define       MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_OFST 44
 #define       MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_LEN 6
+/* Base MAC address for Siena Port1. Unused on EF10 and later (use
+ * MC_CMD_GET_MAC_ADDRESSES).
+ */
 #define       MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_OFST 50
 #define       MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_LEN 6
+/* Size of MAC address pool for Siena Port0. Unused on EF10 and later (use
+ * MC_CMD_GET_MAC_ADDRESSES).
+ */
 #define       MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT0_OFST 56
+#define       MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT0_LEN 4
+/* Size of MAC address pool for Siena Port1. Unused on EF10 and later (use
+ * MC_CMD_GET_MAC_ADDRESSES).
+ */
 #define       MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT1_OFST 60
+#define       MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT1_LEN 4
+/* Increment between addresses in MAC address pool for Siena Port0. Unused on
+ * EF10 and later (use MC_CMD_GET_MAC_ADDRESSES).
+ */
 #define       MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT0_OFST 64
+#define       MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT0_LEN 4
+/* Increment between addresses in MAC address pool for Siena Port1. Unused on
+ * EF10 and later (use MC_CMD_GET_MAC_ADDRESSES).
+ */
 #define       MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT1_OFST 68
-/* This field contains a 16-bit value for each of the types of NVRAM area. The
- * values are defined in the firmware/mc/platform/.c file for a specific board
- * type, but otherwise have no meaning to the MC; they are used by the driver
- * to manage selection of appropriate firmware updates.
+#define       MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT1_LEN 4
+/* Siena only. This field contains a 16-bit value for each of the types of
+ * NVRAM area. The values are defined in the firmware/mc/platform/.c file for a
+ * specific board type, but otherwise have no meaning to the MC; they are used
+ * by the driver to manage selection of appropriate firmware updates. Unused on
+ * EF10 and later (use MC_CMD_NVRAM_METADATA).
  */
 #define       MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_OFST 72
 #define       MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_LEN 2
  */
 #define MC_CMD_DBI_READX 0x19
 
-#define MC_CMD_0x19_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x19_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_DBI_READX_IN msgrequest */
 #define    MC_CMD_DBI_READX_IN_LENMIN 8
 /* MC_CMD_DBIRDOP_TYPEDEF structuredef */
 #define    MC_CMD_DBIRDOP_TYPEDEF_LEN 8
 #define       MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_OFST 0
+#define       MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_LEN 4
 #define       MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_LBN 0
 #define       MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_WIDTH 32
 #define       MC_CMD_DBIRDOP_TYPEDEF_PARMS_OFST 4
+#define       MC_CMD_DBIRDOP_TYPEDEF_PARMS_LEN 4
 #define        MC_CMD_DBIRDOP_TYPEDEF_VF_NUM_LBN 16
 #define        MC_CMD_DBIRDOP_TYPEDEF_VF_NUM_WIDTH 16
 #define        MC_CMD_DBIRDOP_TYPEDEF_VF_ACTIVE_LBN 15
  */
 #define MC_CMD_SET_RAND_SEED 0x1a
 
-#define MC_CMD_0x1a_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x1a_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_SET_RAND_SEED_IN msgrequest */
 #define    MC_CMD_SET_RAND_SEED_IN_LEN 16
 #define    MC_CMD_DRV_ATTACH_IN_LEN 12
 /* new state to set if UPDATE=1 */
 #define       MC_CMD_DRV_ATTACH_IN_NEW_STATE_OFST 0
+#define       MC_CMD_DRV_ATTACH_IN_NEW_STATE_LEN 4
 #define        MC_CMD_DRV_ATTACH_LBN 0
 #define        MC_CMD_DRV_ATTACH_WIDTH 1
 #define        MC_CMD_DRV_PREBOOT_LBN 1
 #define        MC_CMD_DRV_PREBOOT_WIDTH 1
 /* 1 to set new state, or 0 to just report the existing state */
 #define       MC_CMD_DRV_ATTACH_IN_UPDATE_OFST 4
+#define       MC_CMD_DRV_ATTACH_IN_UPDATE_LEN 4
 /* preferred datapath firmware (for Huntington; ignored for Siena) */
 #define       MC_CMD_DRV_ATTACH_IN_FIRMWARE_ID_OFST 8
+#define       MC_CMD_DRV_ATTACH_IN_FIRMWARE_ID_LEN 4
 /* enum: Prefer to use full featured firmware */
 #define          MC_CMD_FW_FULL_FEATURED 0x0
 /* enum: Prefer to use firmware with fewer features but lower latency */
 #define    MC_CMD_DRV_ATTACH_OUT_LEN 4
 /* previous or existing state, see the bitmask at NEW_STATE */
 #define       MC_CMD_DRV_ATTACH_OUT_OLD_STATE_OFST 0
+#define       MC_CMD_DRV_ATTACH_OUT_OLD_STATE_LEN 4
 
 /* MC_CMD_DRV_ATTACH_EXT_OUT msgresponse */
 #define    MC_CMD_DRV_ATTACH_EXT_OUT_LEN 8
 /* previous or existing state, see the bitmask at NEW_STATE */
 #define       MC_CMD_DRV_ATTACH_EXT_OUT_OLD_STATE_OFST 0
+#define       MC_CMD_DRV_ATTACH_EXT_OUT_OLD_STATE_LEN 4
 /* Flags associated with this function */
 #define       MC_CMD_DRV_ATTACH_EXT_OUT_FUNC_FLAGS_OFST 4
+#define       MC_CMD_DRV_ATTACH_EXT_OUT_FUNC_FLAGS_LEN 4
 /* enum: Labels the lowest-numbered function visible to the OS */
 #define          MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY 0x0
 /* enum: The function can control the link state of the physical port it is
 #define    MC_CMD_SHMUART_IN_LEN 4
 /* ??? */
 #define       MC_CMD_SHMUART_IN_FLAG_OFST 0
+#define       MC_CMD_SHMUART_IN_FLAG_LEN 4
 
 /* MC_CMD_SHMUART_OUT msgresponse */
 #define    MC_CMD_SHMUART_OUT_LEN 0
  * (TBD).
  */
 #define       MC_CMD_ENTITY_RESET_IN_FLAG_OFST 0
+#define       MC_CMD_ENTITY_RESET_IN_FLAG_LEN 4
 #define        MC_CMD_ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET_LBN 0
 #define        MC_CMD_ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET_WIDTH 1
 
 #define    MC_CMD_PCIE_CREDITS_IN_LEN 8
 /* poll period. 0 is disabled */
 #define       MC_CMD_PCIE_CREDITS_IN_POLL_PERIOD_OFST 0
+#define       MC_CMD_PCIE_CREDITS_IN_POLL_PERIOD_LEN 4
 /* wipe statistics */
 #define       MC_CMD_PCIE_CREDITS_IN_WIPE_OFST 4
+#define       MC_CMD_PCIE_CREDITS_IN_WIPE_LEN 4
 
 /* MC_CMD_PCIE_CREDITS_OUT msgresponse */
 #define    MC_CMD_PCIE_CREDITS_OUT_LEN 16
 /* MC_CMD_RXD_MONITOR_IN msgrequest */
 #define    MC_CMD_RXD_MONITOR_IN_LEN 12
 #define       MC_CMD_RXD_MONITOR_IN_QID_OFST 0
+#define       MC_CMD_RXD_MONITOR_IN_QID_LEN 4
 #define       MC_CMD_RXD_MONITOR_IN_POLL_PERIOD_OFST 4
+#define       MC_CMD_RXD_MONITOR_IN_POLL_PERIOD_LEN 4
 #define       MC_CMD_RXD_MONITOR_IN_WIPE_OFST 8
+#define       MC_CMD_RXD_MONITOR_IN_WIPE_LEN 4
 
 /* MC_CMD_RXD_MONITOR_OUT msgresponse */
 #define    MC_CMD_RXD_MONITOR_OUT_LEN 80
 #define       MC_CMD_RXD_MONITOR_OUT_QID_OFST 0
+#define       MC_CMD_RXD_MONITOR_OUT_QID_LEN 4
 #define       MC_CMD_RXD_MONITOR_OUT_RING_FILL_OFST 4
+#define       MC_CMD_RXD_MONITOR_OUT_RING_FILL_LEN 4
 #define       MC_CMD_RXD_MONITOR_OUT_CACHE_FILL_OFST 8
+#define       MC_CMD_RXD_MONITOR_OUT_CACHE_FILL_LEN 4
 #define       MC_CMD_RXD_MONITOR_OUT_RING_LT_1_OFST 12
+#define       MC_CMD_RXD_MONITOR_OUT_RING_LT_1_LEN 4
 #define       MC_CMD_RXD_MONITOR_OUT_RING_LT_2_OFST 16
+#define       MC_CMD_RXD_MONITOR_OUT_RING_LT_2_LEN 4
 #define       MC_CMD_RXD_MONITOR_OUT_RING_LT_4_OFST 20
+#define       MC_CMD_RXD_MONITOR_OUT_RING_LT_4_LEN 4
 #define       MC_CMD_RXD_MONITOR_OUT_RING_LT_8_OFST 24
+#define       MC_CMD_RXD_MONITOR_OUT_RING_LT_8_LEN 4
 #define       MC_CMD_RXD_MONITOR_OUT_RING_LT_16_OFST 28
+#define       MC_CMD_RXD_MONITOR_OUT_RING_LT_16_LEN 4
 #define       MC_CMD_RXD_MONITOR_OUT_RING_LT_32_OFST 32
+#define       MC_CMD_RXD_MONITOR_OUT_RING_LT_32_LEN 4
 #define       MC_CMD_RXD_MONITOR_OUT_RING_LT_64_OFST 36
+#define       MC_CMD_RXD_MONITOR_OUT_RING_LT_64_LEN 4
 #define       MC_CMD_RXD_MONITOR_OUT_RING_LT_128_OFST 40
+#define       MC_CMD_RXD_MONITOR_OUT_RING_LT_128_LEN 4
 #define       MC_CMD_RXD_MONITOR_OUT_RING_LT_256_OFST 44
+#define       MC_CMD_RXD_MONITOR_OUT_RING_LT_256_LEN 4
 #define       MC_CMD_RXD_MONITOR_OUT_RING_GE_256_OFST 48
+#define       MC_CMD_RXD_MONITOR_OUT_RING_GE_256_LEN 4
 #define       MC_CMD_RXD_MONITOR_OUT_CACHE_LT_1_OFST 52
+#define       MC_CMD_RXD_MONITOR_OUT_CACHE_LT_1_LEN 4
 #define       MC_CMD_RXD_MONITOR_OUT_CACHE_LT_2_OFST 56
+#define       MC_CMD_RXD_MONITOR_OUT_CACHE_LT_2_LEN 4
 #define       MC_CMD_RXD_MONITOR_OUT_CACHE_LT_4_OFST 60
+#define       MC_CMD_RXD_MONITOR_OUT_CACHE_LT_4_LEN 4
 #define       MC_CMD_RXD_MONITOR_OUT_CACHE_LT_8_OFST 64
+#define       MC_CMD_RXD_MONITOR_OUT_CACHE_LT_8_LEN 4
 #define       MC_CMD_RXD_MONITOR_OUT_CACHE_LT_16_OFST 68
+#define       MC_CMD_RXD_MONITOR_OUT_CACHE_LT_16_LEN 4
 #define       MC_CMD_RXD_MONITOR_OUT_CACHE_LT_32_OFST 72
+#define       MC_CMD_RXD_MONITOR_OUT_CACHE_LT_32_LEN 4
 #define       MC_CMD_RXD_MONITOR_OUT_CACHE_GE_32_OFST 76
+#define       MC_CMD_RXD_MONITOR_OUT_CACHE_GE_32_LEN 4
 
 
 /***********************************/
  */
 #define MC_CMD_PUTS 0x23
 
-#define MC_CMD_0x23_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x23_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_PUTS_IN msgrequest */
 #define    MC_CMD_PUTS_IN_LENMIN 13
 #define    MC_CMD_PUTS_IN_LENMAX 252
 #define    MC_CMD_PUTS_IN_LEN(num) (12+1*(num))
 #define       MC_CMD_PUTS_IN_DEST_OFST 0
+#define       MC_CMD_PUTS_IN_DEST_LEN 4
 #define        MC_CMD_PUTS_IN_UART_LBN 0
 #define        MC_CMD_PUTS_IN_UART_WIDTH 1
 #define        MC_CMD_PUTS_IN_PORT_LBN 1
 #define    MC_CMD_GET_PHY_CFG_OUT_LEN 72
 /* flags */
 #define       MC_CMD_GET_PHY_CFG_OUT_FLAGS_OFST 0
+#define       MC_CMD_GET_PHY_CFG_OUT_FLAGS_LEN 4
 #define        MC_CMD_GET_PHY_CFG_OUT_PRESENT_LBN 0
 #define        MC_CMD_GET_PHY_CFG_OUT_PRESENT_WIDTH 1
 #define        MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_SHORT_LBN 1
 #define        MC_CMD_GET_PHY_CFG_OUT_BIST_WIDTH 1
 /* ?? */
 #define       MC_CMD_GET_PHY_CFG_OUT_TYPE_OFST 4
+#define       MC_CMD_GET_PHY_CFG_OUT_TYPE_LEN 4
 /* Bitmask of supported capabilities */
 #define       MC_CMD_GET_PHY_CFG_OUT_SUPPORTED_CAP_OFST 8
+#define       MC_CMD_GET_PHY_CFG_OUT_SUPPORTED_CAP_LEN 4
 #define        MC_CMD_PHY_CAP_10HDX_LBN 1
 #define        MC_CMD_PHY_CAP_10HDX_WIDTH 1
 #define        MC_CMD_PHY_CAP_10FDX_LBN 2
 #define        MC_CMD_PHY_CAP_40000FDX_WIDTH 1
 #define        MC_CMD_PHY_CAP_DDM_LBN 12
 #define        MC_CMD_PHY_CAP_DDM_WIDTH 1
+#define        MC_CMD_PHY_CAP_100000FDX_LBN 13
+#define        MC_CMD_PHY_CAP_100000FDX_WIDTH 1
+#define        MC_CMD_PHY_CAP_25000FDX_LBN 14
+#define        MC_CMD_PHY_CAP_25000FDX_WIDTH 1
+#define        MC_CMD_PHY_CAP_50000FDX_LBN 15
+#define        MC_CMD_PHY_CAP_50000FDX_WIDTH 1
+#define        MC_CMD_PHY_CAP_BASER_FEC_LBN 16
+#define        MC_CMD_PHY_CAP_BASER_FEC_WIDTH 1
+#define        MC_CMD_PHY_CAP_BASER_FEC_REQUESTED_LBN 17
+#define        MC_CMD_PHY_CAP_BASER_FEC_REQUESTED_WIDTH 1
+#define        MC_CMD_PHY_CAP_RS_FEC_LBN 18
+#define        MC_CMD_PHY_CAP_RS_FEC_WIDTH 1
+#define        MC_CMD_PHY_CAP_RS_FEC_REQUESTED_LBN 19
+#define        MC_CMD_PHY_CAP_RS_FEC_REQUESTED_WIDTH 1
+#define        MC_CMD_PHY_CAP_25G_BASER_FEC_LBN 20
+#define        MC_CMD_PHY_CAP_25G_BASER_FEC_WIDTH 1
+#define        MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_LBN 21
+#define        MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_WIDTH 1
 /* ?? */
 #define       MC_CMD_GET_PHY_CFG_OUT_CHANNEL_OFST 12
+#define       MC_CMD_GET_PHY_CFG_OUT_CHANNEL_LEN 4
 /* ?? */
 #define       MC_CMD_GET_PHY_CFG_OUT_PRT_OFST 16
+#define       MC_CMD_GET_PHY_CFG_OUT_PRT_LEN 4
 /* ?? */
 #define       MC_CMD_GET_PHY_CFG_OUT_STATS_MASK_OFST 20
+#define       MC_CMD_GET_PHY_CFG_OUT_STATS_MASK_LEN 4
 /* ?? */
 #define       MC_CMD_GET_PHY_CFG_OUT_NAME_OFST 24
 #define       MC_CMD_GET_PHY_CFG_OUT_NAME_LEN 20
 /* ?? */
 #define       MC_CMD_GET_PHY_CFG_OUT_MEDIA_TYPE_OFST 44
+#define       MC_CMD_GET_PHY_CFG_OUT_MEDIA_TYPE_LEN 4
 /* enum: Xaui. */
 #define          MC_CMD_MEDIA_XAUI 0x1
 /* enum: CX4. */
 /* enum: QSFP+. */
 #define          MC_CMD_MEDIA_QSFP_PLUS 0x7
 #define       MC_CMD_GET_PHY_CFG_OUT_MMD_MASK_OFST 48
+#define       MC_CMD_GET_PHY_CFG_OUT_MMD_MASK_LEN 4
 /* enum: Native clause 22 */
 #define          MC_CMD_MMD_CLAUSE22 0x0
 #define          MC_CMD_MMD_CLAUSE45_PMAPMD 0x1 /* enum */
 #define    MC_CMD_START_BIST_IN_LEN 4
 /* Type of test. */
 #define       MC_CMD_START_BIST_IN_TYPE_OFST 0
+#define       MC_CMD_START_BIST_IN_TYPE_LEN 4
 /* enum: Run the PHY's short cable BIST. */
 #define          MC_CMD_PHY_BIST_CABLE_SHORT 0x1
 /* enum: Run the PHY's long cable BIST. */
 #define    MC_CMD_POLL_BIST_OUT_LEN 8
 /* result */
 #define       MC_CMD_POLL_BIST_OUT_RESULT_OFST 0
+#define       MC_CMD_POLL_BIST_OUT_RESULT_LEN 4
 /* enum: Running. */
 #define          MC_CMD_POLL_BIST_RUNNING 0x1
 /* enum: Passed. */
 /* enum: Timed-out. */
 #define          MC_CMD_POLL_BIST_TIMEOUT 0x4
 #define       MC_CMD_POLL_BIST_OUT_PRIVATE_OFST 4
+#define       MC_CMD_POLL_BIST_OUT_PRIVATE_LEN 4
 
 /* MC_CMD_POLL_BIST_OUT_SFT9001 msgresponse */
 #define    MC_CMD_POLL_BIST_OUT_SFT9001_LEN 36
 /* result */
 /*            MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 */
+/*            MC_CMD_POLL_BIST_OUT_RESULT_LEN 4 */
 /*            Enum values, see field(s): */
 /*               MC_CMD_POLL_BIST_OUT/MC_CMD_POLL_BIST_OUT_RESULT */
 #define       MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A_OFST 4
+#define       MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A_LEN 4
 #define       MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_B_OFST 8
+#define       MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_B_LEN 4
 #define       MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_C_OFST 12
+#define       MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_C_LEN 4
 #define       MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_D_OFST 16
+#define       MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_D_LEN 4
 /* Status of each channel A */
 #define       MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_A_OFST 20
+#define       MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_A_LEN 4
 /* enum: Ok. */
 #define          MC_CMD_POLL_BIST_SFT9001_PAIR_OK 0x1
 /* enum: Open. */
 #define          MC_CMD_POLL_BIST_SFT9001_PAIR_BUSY 0x9
 /* Status of each channel B */
 #define       MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_B_OFST 24
+#define       MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_B_LEN 4
 /*            Enum values, see field(s): */
 /*               CABLE_STATUS_A */
 /* Status of each channel C */
 #define       MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_C_OFST 28
+#define       MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_C_LEN 4
 /*            Enum values, see field(s): */
 /*               CABLE_STATUS_A */
 /* Status of each channel D */
 #define       MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_D_OFST 32
+#define       MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_D_LEN 4
 /*            Enum values, see field(s): */
 /*               CABLE_STATUS_A */
 
 #define    MC_CMD_POLL_BIST_OUT_MRSFP_LEN 8
 /* result */
 /*            MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 */
+/*            MC_CMD_POLL_BIST_OUT_RESULT_LEN 4 */
 /*            Enum values, see field(s): */
 /*               MC_CMD_POLL_BIST_OUT/MC_CMD_POLL_BIST_OUT_RESULT */
 #define       MC_CMD_POLL_BIST_OUT_MRSFP_TEST_OFST 4
+#define       MC_CMD_POLL_BIST_OUT_MRSFP_TEST_LEN 4
 /* enum: Complete. */
 #define          MC_CMD_POLL_BIST_MRSFP_TEST_COMPLETE 0x0
 /* enum: Bus switch off I2C write. */
 #define    MC_CMD_POLL_BIST_OUT_MEM_LEN 36
 /* result */
 /*            MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 */
+/*            MC_CMD_POLL_BIST_OUT_RESULT_LEN 4 */
 /*            Enum values, see field(s): */
 /*               MC_CMD_POLL_BIST_OUT/MC_CMD_POLL_BIST_OUT_RESULT */
 #define       MC_CMD_POLL_BIST_OUT_MEM_TEST_OFST 4
+#define       MC_CMD_POLL_BIST_OUT_MEM_TEST_LEN 4
 /* enum: Test has completed. */
 #define          MC_CMD_POLL_BIST_MEM_COMPLETE 0x0
 /* enum: RAM test - walk ones. */
 #define          MC_CMD_POLL_BIST_MEM_ECC 0x6
 /* Failure address, only valid if result is POLL_BIST_FAILED */
 #define       MC_CMD_POLL_BIST_OUT_MEM_ADDR_OFST 8
+#define       MC_CMD_POLL_BIST_OUT_MEM_ADDR_LEN 4
 /* Bus or address space to which the failure address corresponds */
 #define       MC_CMD_POLL_BIST_OUT_MEM_BUS_OFST 12
+#define       MC_CMD_POLL_BIST_OUT_MEM_BUS_LEN 4
 /* enum: MC MIPS bus. */
 #define          MC_CMD_POLL_BIST_MEM_BUS_MC 0x0
 /* enum: CSR IREG bus. */
 #define          MC_CMD_POLL_BIST_MEM_BUS_DICPU_RX1 0x8
 /* Pattern written to RAM / register */
 #define       MC_CMD_POLL_BIST_OUT_MEM_EXPECT_OFST 16
+#define       MC_CMD_POLL_BIST_OUT_MEM_EXPECT_LEN 4
 /* Actual value read from RAM / register */
 #define       MC_CMD_POLL_BIST_OUT_MEM_ACTUAL_OFST 20
+#define       MC_CMD_POLL_BIST_OUT_MEM_ACTUAL_LEN 4
 /* ECC error mask */
 #define       MC_CMD_POLL_BIST_OUT_MEM_ECC_OFST 24
+#define       MC_CMD_POLL_BIST_OUT_MEM_ECC_LEN 4
 /* ECC parity error mask */
 #define       MC_CMD_POLL_BIST_OUT_MEM_ECC_PARITY_OFST 28
+#define       MC_CMD_POLL_BIST_OUT_MEM_ECC_PARITY_LEN 4
 /* ECC fatal error mask */
 #define       MC_CMD_POLL_BIST_OUT_MEM_ECC_FATAL_OFST 32
+#define       MC_CMD_POLL_BIST_OUT_MEM_ECC_FATAL_LEN 4
 
 
 /***********************************/
 /*            Enum values, see field(s): */
 /*               100M */
 
+/* MC_CMD_GET_LOOPBACK_MODES_OUT_V2 msgresponse: Supported loopback modes for
+ * newer NICs with 25G/50G/100G support
+ */
+#define    MC_CMD_GET_LOOPBACK_MODES_OUT_V2_LEN 64
+/* Supported loopbacks. */
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_OFST 0
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_LEN 8
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_LO_OFST 0
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_HI_OFST 4
+/* enum: None. */
+/*               MC_CMD_LOOPBACK_NONE  0x0 */
+/* enum: Data. */
+/*               MC_CMD_LOOPBACK_DATA  0x1 */
+/* enum: GMAC. */
+/*               MC_CMD_LOOPBACK_GMAC  0x2 */
+/* enum: XGMII. */
+/*               MC_CMD_LOOPBACK_XGMII 0x3 */
+/* enum: XGXS. */
+/*               MC_CMD_LOOPBACK_XGXS  0x4 */
+/* enum: XAUI. */
+/*               MC_CMD_LOOPBACK_XAUI  0x5 */
+/* enum: GMII. */
+/*               MC_CMD_LOOPBACK_GMII  0x6 */
+/* enum: SGMII. */
+/*               MC_CMD_LOOPBACK_SGMII  0x7 */
+/* enum: XGBR. */
+/*               MC_CMD_LOOPBACK_XGBR  0x8 */
+/* enum: XFI. */
+/*               MC_CMD_LOOPBACK_XFI  0x9 */
+/* enum: XAUI Far. */
+/*               MC_CMD_LOOPBACK_XAUI_FAR  0xa */
+/* enum: GMII Far. */
+/*               MC_CMD_LOOPBACK_GMII_FAR  0xb */
+/* enum: SGMII Far. */
+/*               MC_CMD_LOOPBACK_SGMII_FAR  0xc */
+/* enum: XFI Far. */
+/*               MC_CMD_LOOPBACK_XFI_FAR  0xd */
+/* enum: GPhy. */
+/*               MC_CMD_LOOPBACK_GPHY  0xe */
+/* enum: PhyXS. */
+/*               MC_CMD_LOOPBACK_PHYXS  0xf */
+/* enum: PCS. */
+/*               MC_CMD_LOOPBACK_PCS  0x10 */
+/* enum: PMA-PMD. */
+/*               MC_CMD_LOOPBACK_PMAPMD  0x11 */
+/* enum: Cross-Port. */
+/*               MC_CMD_LOOPBACK_XPORT  0x12 */
+/* enum: XGMII-Wireside. */
+/*               MC_CMD_LOOPBACK_XGMII_WS  0x13 */
+/* enum: XAUI Wireside. */
+/*               MC_CMD_LOOPBACK_XAUI_WS  0x14 */
+/* enum: XAUI Wireside Far. */
+/*               MC_CMD_LOOPBACK_XAUI_WS_FAR  0x15 */
+/* enum: XAUI Wireside near. */
+/*               MC_CMD_LOOPBACK_XAUI_WS_NEAR  0x16 */
+/* enum: GMII Wireside. */
+/*               MC_CMD_LOOPBACK_GMII_WS  0x17 */
+/* enum: XFI Wireside. */
+/*               MC_CMD_LOOPBACK_XFI_WS  0x18 */
+/* enum: XFI Wireside Far. */
+/*               MC_CMD_LOOPBACK_XFI_WS_FAR  0x19 */
+/* enum: PhyXS Wireside. */
+/*               MC_CMD_LOOPBACK_PHYXS_WS  0x1a */
+/* enum: PMA lanes MAC-Serdes. */
+/*               MC_CMD_LOOPBACK_PMA_INT  0x1b */
+/* enum: KR Serdes Parallel (Encoder). */
+/*               MC_CMD_LOOPBACK_SD_NEAR  0x1c */
+/* enum: KR Serdes Serial. */
+/*               MC_CMD_LOOPBACK_SD_FAR  0x1d */
+/* enum: PMA lanes MAC-Serdes Wireside. */
+/*               MC_CMD_LOOPBACK_PMA_INT_WS  0x1e */
+/* enum: KR Serdes Parallel Wireside (Full PCS). */
+/*               MC_CMD_LOOPBACK_SD_FEP2_WS  0x1f */
+/* enum: KR Serdes Parallel Wireside (Sym Aligner to TX). */
+/*               MC_CMD_LOOPBACK_SD_FEP1_5_WS  0x20 */
+/* enum: KR Serdes Parallel Wireside (Deserializer to Serializer). */
+/*               MC_CMD_LOOPBACK_SD_FEP_WS  0x21 */
+/* enum: KR Serdes Serial Wireside. */
+/*               MC_CMD_LOOPBACK_SD_FES_WS  0x22 */
+/* enum: Near side of AOE Siena side port */
+/*               MC_CMD_LOOPBACK_AOE_INT_NEAR  0x23 */
+/* enum: Medford Wireside datapath loopback */
+/*               MC_CMD_LOOPBACK_DATA_WS  0x24 */
+/* enum: Force link up without setting up any physical loopback (snapper use
+ * only)
+ */
+/*               MC_CMD_LOOPBACK_FORCE_EXT_LINK  0x25 */
+/* Supported loopbacks. */
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_OFST 8
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_LEN 8
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_LO_OFST 8
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_HI_OFST 12
+/*            Enum values, see field(s): */
+/*               100M */
+/* Supported loopbacks. */
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_OFST 16
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_LEN 8
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_LO_OFST 16
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_HI_OFST 20
+/*            Enum values, see field(s): */
+/*               100M */
+/* Supported loopbacks. */
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_OFST 24
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_LEN 8
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_LO_OFST 24
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_HI_OFST 28
+/*            Enum values, see field(s): */
+/*               100M */
+/* Supported loopbacks. */
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_OFST 32
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_LEN 8
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_LO_OFST 32
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_HI_OFST 36
+/*            Enum values, see field(s): */
+/*               100M */
+/* Supported 25G loopbacks. */
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_OFST 40
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_LEN 8
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_LO_OFST 40
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_HI_OFST 44
+/*            Enum values, see field(s): */
+/*               100M */
+/* Supported 50 loopbacks. */
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_OFST 48
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_LEN 8
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_LO_OFST 48
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_HI_OFST 52
+/*            Enum values, see field(s): */
+/*               100M */
+/* Supported 100G loopbacks. */
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_OFST 56
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_LEN 8
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_LO_OFST 56
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_HI_OFST 60
+/*            Enum values, see field(s): */
+/*               100M */
+
 
 /***********************************/
 /* MC_CMD_GET_LINK
 #define    MC_CMD_GET_LINK_OUT_LEN 28
 /* near-side advertised capabilities */
 #define       MC_CMD_GET_LINK_OUT_CAP_OFST 0
+#define       MC_CMD_GET_LINK_OUT_CAP_LEN 4
 /* link-partner advertised capabilities */
 #define       MC_CMD_GET_LINK_OUT_LP_CAP_OFST 4
+#define       MC_CMD_GET_LINK_OUT_LP_CAP_LEN 4
 /* Autonegotiated speed in mbit/s. The link may still be down even if this
  * reads non-zero.
  */
 #define       MC_CMD_GET_LINK_OUT_LINK_SPEED_OFST 8
+#define       MC_CMD_GET_LINK_OUT_LINK_SPEED_LEN 4
 /* Current loopback setting. */
 #define       MC_CMD_GET_LINK_OUT_LOOPBACK_MODE_OFST 12
+#define       MC_CMD_GET_LINK_OUT_LOOPBACK_MODE_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */
 #define       MC_CMD_GET_LINK_OUT_FLAGS_OFST 16
+#define       MC_CMD_GET_LINK_OUT_FLAGS_LEN 4
 #define        MC_CMD_GET_LINK_OUT_LINK_UP_LBN 0
 #define        MC_CMD_GET_LINK_OUT_LINK_UP_WIDTH 1
 #define        MC_CMD_GET_LINK_OUT_FULL_DUPLEX_LBN 1
 #define        MC_CMD_GET_LINK_OUT_LINK_FAULT_TX_WIDTH 1
 /* This returns the negotiated flow control value. */
 #define       MC_CMD_GET_LINK_OUT_FCNTL_OFST 20
+#define       MC_CMD_GET_LINK_OUT_FCNTL_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_SET_MAC/MC_CMD_SET_MAC_IN/FCNTL */
 #define       MC_CMD_GET_LINK_OUT_MAC_FAULT_OFST 24
+#define       MC_CMD_GET_LINK_OUT_MAC_FAULT_LEN 4
 #define        MC_CMD_MAC_FAULT_XGMII_LOCAL_LBN 0
 #define        MC_CMD_MAC_FAULT_XGMII_LOCAL_WIDTH 1
 #define        MC_CMD_MAC_FAULT_XGMII_REMOTE_LBN 1
 #define    MC_CMD_SET_LINK_IN_LEN 16
 /* ??? */
 #define       MC_CMD_SET_LINK_IN_CAP_OFST 0
+#define       MC_CMD_SET_LINK_IN_CAP_LEN 4
 /* Flags */
 #define       MC_CMD_SET_LINK_IN_FLAGS_OFST 4
+#define       MC_CMD_SET_LINK_IN_FLAGS_LEN 4
 #define        MC_CMD_SET_LINK_IN_LOWPOWER_LBN 0
 #define        MC_CMD_SET_LINK_IN_LOWPOWER_WIDTH 1
 #define        MC_CMD_SET_LINK_IN_POWEROFF_LBN 1
 #define        MC_CMD_SET_LINK_IN_TXDIS_WIDTH 1
 /* Loopback mode. */
 #define       MC_CMD_SET_LINK_IN_LOOPBACK_MODE_OFST 8
+#define       MC_CMD_SET_LINK_IN_LOOPBACK_MODE_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */
 /* A loopback speed of "0" is supported, and means (choose any available
  * speed).
  */
 #define       MC_CMD_SET_LINK_IN_LOOPBACK_SPEED_OFST 12
+#define       MC_CMD_SET_LINK_IN_LOOPBACK_SPEED_LEN 4
 
 /* MC_CMD_SET_LINK_OUT msgresponse */
 #define    MC_CMD_SET_LINK_OUT_LEN 0
 #define    MC_CMD_SET_ID_LED_IN_LEN 4
 /* Set LED state. */
 #define       MC_CMD_SET_ID_LED_IN_STATE_OFST 0
+#define       MC_CMD_SET_ID_LED_IN_STATE_LEN 4
 #define          MC_CMD_LED_OFF  0x0 /* enum */
 #define          MC_CMD_LED_ON  0x1 /* enum */
 #define          MC_CMD_LED_DEFAULT  0x2 /* enum */
  * EtherII, VLAN, bug16011 padding).
  */
 #define       MC_CMD_SET_MAC_IN_MTU_OFST 0
+#define       MC_CMD_SET_MAC_IN_MTU_LEN 4
 #define       MC_CMD_SET_MAC_IN_DRAIN_OFST 4
+#define       MC_CMD_SET_MAC_IN_DRAIN_LEN 4
 #define       MC_CMD_SET_MAC_IN_ADDR_OFST 8
 #define       MC_CMD_SET_MAC_IN_ADDR_LEN 8
 #define       MC_CMD_SET_MAC_IN_ADDR_LO_OFST 8
 #define       MC_CMD_SET_MAC_IN_ADDR_HI_OFST 12
 #define       MC_CMD_SET_MAC_IN_REJECT_OFST 16
+#define       MC_CMD_SET_MAC_IN_REJECT_LEN 4
 #define        MC_CMD_SET_MAC_IN_REJECT_UNCST_LBN 0
 #define        MC_CMD_SET_MAC_IN_REJECT_UNCST_WIDTH 1
 #define        MC_CMD_SET_MAC_IN_REJECT_BRDCST_LBN 1
 #define        MC_CMD_SET_MAC_IN_REJECT_BRDCST_WIDTH 1
 #define       MC_CMD_SET_MAC_IN_FCNTL_OFST 20
+#define       MC_CMD_SET_MAC_IN_FCNTL_LEN 4
 /* enum: Flow control is off. */
 #define          MC_CMD_FCNTL_OFF 0x0
 /* enum: Respond to flow control. */
 /* enum: Issue flow control. */
 #define          MC_CMD_FCNTL_GENERATE 0x5
 #define       MC_CMD_SET_MAC_IN_FLAGS_OFST 24
+#define       MC_CMD_SET_MAC_IN_FLAGS_LEN 4
 #define        MC_CMD_SET_MAC_IN_FLAG_INCLUDE_FCS_LBN 0
 #define        MC_CMD_SET_MAC_IN_FLAG_INCLUDE_FCS_WIDTH 1
 
  * EtherII, VLAN, bug16011 padding).
  */
 #define       MC_CMD_SET_MAC_EXT_IN_MTU_OFST 0
+#define       MC_CMD_SET_MAC_EXT_IN_MTU_LEN 4
 #define       MC_CMD_SET_MAC_EXT_IN_DRAIN_OFST 4
+#define       MC_CMD_SET_MAC_EXT_IN_DRAIN_LEN 4
 #define       MC_CMD_SET_MAC_EXT_IN_ADDR_OFST 8
 #define       MC_CMD_SET_MAC_EXT_IN_ADDR_LEN 8
 #define       MC_CMD_SET_MAC_EXT_IN_ADDR_LO_OFST 8
 #define       MC_CMD_SET_MAC_EXT_IN_ADDR_HI_OFST 12
 #define       MC_CMD_SET_MAC_EXT_IN_REJECT_OFST 16
+#define       MC_CMD_SET_MAC_EXT_IN_REJECT_LEN 4
 #define        MC_CMD_SET_MAC_EXT_IN_REJECT_UNCST_LBN 0
 #define        MC_CMD_SET_MAC_EXT_IN_REJECT_UNCST_WIDTH 1
 #define        MC_CMD_SET_MAC_EXT_IN_REJECT_BRDCST_LBN 1
 #define        MC_CMD_SET_MAC_EXT_IN_REJECT_BRDCST_WIDTH 1
 #define       MC_CMD_SET_MAC_EXT_IN_FCNTL_OFST 20
+#define       MC_CMD_SET_MAC_EXT_IN_FCNTL_LEN 4
 /* enum: Flow control is off. */
 /*               MC_CMD_FCNTL_OFF 0x0 */
 /* enum: Respond to flow control. */
 /* enum: Issue flow control. */
 /*               MC_CMD_FCNTL_GENERATE 0x5 */
 #define       MC_CMD_SET_MAC_EXT_IN_FLAGS_OFST 24
+#define       MC_CMD_SET_MAC_EXT_IN_FLAGS_LEN 4
 #define        MC_CMD_SET_MAC_EXT_IN_FLAG_INCLUDE_FCS_LBN 0
 #define        MC_CMD_SET_MAC_EXT_IN_FLAG_INCLUDE_FCS_WIDTH 1
 /* Select which parameters to configure. A parameter will only be modified if
  * set).
  */
 #define       MC_CMD_SET_MAC_EXT_IN_CONTROL_OFST 28
+#define       MC_CMD_SET_MAC_EXT_IN_CONTROL_LEN 4
 #define        MC_CMD_SET_MAC_EXT_IN_CFG_MTU_LBN 0
 #define        MC_CMD_SET_MAC_EXT_IN_CFG_MTU_WIDTH 1
 #define        MC_CMD_SET_MAC_EXT_IN_CFG_DRAIN_LBN 1
  * to 0.
  */
 #define       MC_CMD_SET_MAC_V2_OUT_MTU_OFST 0
+#define       MC_CMD_SET_MAC_V2_OUT_MTU_LEN 4
 
 
 /***********************************/
 #define       MC_CMD_MAC_STATS_IN_DMA_ADDR_LO_OFST 0
 #define       MC_CMD_MAC_STATS_IN_DMA_ADDR_HI_OFST 4
 #define       MC_CMD_MAC_STATS_IN_CMD_OFST 8
+#define       MC_CMD_MAC_STATS_IN_CMD_LEN 4
 #define        MC_CMD_MAC_STATS_IN_DMA_LBN 0
 #define        MC_CMD_MAC_STATS_IN_DMA_WIDTH 1
 #define        MC_CMD_MAC_STATS_IN_CLEAR_LBN 1
 #define        MC_CMD_MAC_STATS_IN_PERIODIC_NOEVENT_WIDTH 1
 #define        MC_CMD_MAC_STATS_IN_PERIOD_MS_LBN 16
 #define        MC_CMD_MAC_STATS_IN_PERIOD_MS_WIDTH 16
+/* DMA length. Should be set to MAC_STATS_NUM_STATS * sizeof(uint64_t), as
+ * returned by MC_CMD_GET_CAPABILITIES_V4_OUT. For legacy firmware not
+ * supporting MC_CMD_GET_CAPABILITIES_V4_OUT, DMA_LEN should be set to
+ * MC_CMD_MAC_NSTATS * sizeof(uint64_t)
+ */
 #define       MC_CMD_MAC_STATS_IN_DMA_LEN_OFST 12
+#define       MC_CMD_MAC_STATS_IN_DMA_LEN_LEN 4
 /* port id so vadapter stats can be provided */
 #define       MC_CMD_MAC_STATS_IN_PORT_ID_OFST 16
+#define       MC_CMD_MAC_STATS_IN_PORT_ID_LEN 4
 
 /* MC_CMD_MAC_STATS_OUT_DMA msgresponse */
 #define    MC_CMD_MAC_STATS_OUT_DMA_LEN 0
 #define          MC_CMD_GMAC_DMABUF_START  0x40
 /* enum: End of GMAC stats buffer space, for Siena only. */
 #define          MC_CMD_GMAC_DMABUF_END    0x5f
-#define          MC_CMD_MAC_GENERATION_END 0x60 /* enum */
+/* enum: GENERATION_END value, used together with GENERATION_START to verify
+ * consistency of DMAd data. For legacy firmware / drivers without extended
+ * stats (more precisely, when DMA_LEN == MC_CMD_MAC_NSTATS *
+ * sizeof(uint64_t)), this entry holds the GENERATION_END value. Otherwise,
+ * this value is invalid/ reserved and GENERATION_END is written as the last
+ * 64-bit word of the DMA buffer (at DMA_LEN - sizeof(uint64_t)). Note that
+ * this is consistent with the legacy behaviour, in the sense that entry 96 is
+ * the last 64-bit word in the buffer when DMA_LEN == MC_CMD_MAC_NSTATS *
+ * sizeof(uint64_t). See SF-109306-TC, Section 9.2 for details.
+ */
+#define          MC_CMD_MAC_GENERATION_END 0x60
 #define          MC_CMD_MAC_NSTATS  0x61 /* enum */
 
+/* MC_CMD_MAC_STATS_V2_OUT_DMA msgresponse */
+#define    MC_CMD_MAC_STATS_V2_OUT_DMA_LEN 0
+
+/* MC_CMD_MAC_STATS_V2_OUT_NO_DMA msgresponse */
+#define    MC_CMD_MAC_STATS_V2_OUT_NO_DMA_LEN (((MC_CMD_MAC_NSTATS_V2*64))>>3)
+#define       MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_OFST 0
+#define       MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_LEN 8
+#define       MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_LO_OFST 0
+#define       MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_HI_OFST 4
+#define       MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS_V2
+/* enum: Start of FEC stats buffer space, Medford2 and up */
+#define          MC_CMD_MAC_FEC_DMABUF_START  0x61
+/* enum: Number of uncorrected FEC codewords on link (RS-FEC only for Medford2)
+ */
+#define          MC_CMD_MAC_FEC_UNCORRECTED_ERRORS  0x61
+/* enum: Number of corrected FEC codewords on link (RS-FEC only for Medford2)
+ */
+#define          MC_CMD_MAC_FEC_CORRECTED_ERRORS  0x62
+/* enum: Number of corrected 10-bit symbol errors, lane 0 (RS-FEC only) */
+#define          MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE0  0x63
+/* enum: Number of corrected 10-bit symbol errors, lane 1 (RS-FEC only) */
+#define          MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE1  0x64
+/* enum: Number of corrected 10-bit symbol errors, lane 2 (RS-FEC only) */
+#define          MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE2  0x65
+/* enum: Number of corrected 10-bit symbol errors, lane 3 (RS-FEC only) */
+#define          MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE3  0x66
+/* enum: This includes the space at offset 103 which is the final
+ * GENERATION_END in a MAC_STATS_V2 response and otherwise unused.
+ */
+#define          MC_CMD_MAC_NSTATS_V2  0x68
+/*            Other enum values, see field(s): */
+/*               MC_CMD_MAC_STATS_OUT_NO_DMA/STATISTICS */
+
+/* MC_CMD_MAC_STATS_V3_OUT_DMA msgresponse */
+#define    MC_CMD_MAC_STATS_V3_OUT_DMA_LEN 0
+
+/* MC_CMD_MAC_STATS_V3_OUT_NO_DMA msgresponse */
+#define    MC_CMD_MAC_STATS_V3_OUT_NO_DMA_LEN (((MC_CMD_MAC_NSTATS_V3*64))>>3)
+#define       MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_OFST 0
+#define       MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_LEN 8
+#define       MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_LO_OFST 0
+#define       MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_HI_OFST 4
+#define       MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS_V3
+/* enum: Start of CTPIO stats buffer space, Medford2 and up */
+#define          MC_CMD_MAC_CTPIO_DMABUF_START  0x68
+/* enum: Number of CTPIO fallbacks because a DMA packet was in progress on the
+ * target VI
+ */
+#define          MC_CMD_MAC_CTPIO_VI_BUSY_FALLBACK  0x68
+/* enum: Number of times a CTPIO send wrote beyond frame end (informational
+ * only)
+ */
+#define          MC_CMD_MAC_CTPIO_LONG_WRITE_SUCCESS  0x69
+/* enum: Number of CTPIO failures because the TX doorbell was written before
+ * the end of the frame data
+ */
+#define          MC_CMD_MAC_CTPIO_MISSING_DBELL_FAIL  0x6a
+/* enum: Number of CTPIO failures because the internal FIFO overflowed */
+#define          MC_CMD_MAC_CTPIO_OVERFLOW_FAIL  0x6b
+/* enum: Number of CTPIO failures because the host did not deliver data fast
+ * enough to avoid MAC underflow
+ */
+#define          MC_CMD_MAC_CTPIO_UNDERFLOW_FAIL  0x6c
+/* enum: Number of CTPIO failures because the host did not deliver all the
+ * frame data within the timeout
+ */
+#define          MC_CMD_MAC_CTPIO_TIMEOUT_FAIL  0x6d
+/* enum: Number of CTPIO failures because the frame data arrived out of order
+ * or with gaps
+ */
+#define          MC_CMD_MAC_CTPIO_NONCONTIG_WR_FAIL  0x6e
+/* enum: Number of CTPIO failures because the host started a new frame before
+ * completing the previous one
+ */
+#define          MC_CMD_MAC_CTPIO_FRM_CLOBBER_FAIL  0x6f
+/* enum: Number of CTPIO failures because a write was not a multiple of 32 bits
+ * or not 32-bit aligned
+ */
+#define          MC_CMD_MAC_CTPIO_INVALID_WR_FAIL  0x70
+/* enum: Number of CTPIO fallbacks because another VI on the same port was
+ * sending a CTPIO frame
+ */
+#define          MC_CMD_MAC_CTPIO_VI_CLOBBER_FALLBACK  0x71
+/* enum: Number of CTPIO fallbacks because target VI did not have CTPIO enabled
+ */
+#define          MC_CMD_MAC_CTPIO_UNQUALIFIED_FALLBACK  0x72
+/* enum: Number of CTPIO fallbacks because length in header was less than 29
+ * bytes
+ */
+#define          MC_CMD_MAC_CTPIO_RUNT_FALLBACK  0x73
+/* enum: Total number of successful CTPIO sends on this port */
+#define          MC_CMD_MAC_CTPIO_SUCCESS  0x74
+/* enum: Total number of CTPIO fallbacks on this port */
+#define          MC_CMD_MAC_CTPIO_FALLBACK  0x75
+/* enum: Total number of CTPIO poisoned frames on this port, whether erased or
+ * not
+ */
+#define          MC_CMD_MAC_CTPIO_POISON  0x76
+/* enum: Total number of CTPIO erased frames on this port */
+#define          MC_CMD_MAC_CTPIO_ERASE  0x77
+/* enum: This includes the space at offset 120 which is the final
+ * GENERATION_END in a MAC_STATS_V3 response and otherwise unused.
+ */
+#define          MC_CMD_MAC_NSTATS_V3  0x79
+/*            Other enum values, see field(s): */
+/*               MC_CMD_MAC_STATS_V2_OUT_NO_DMA/STATISTICS */
+
 
 /***********************************/
 /* MC_CMD_SRIOV
 /* MC_CMD_SRIOV_IN msgrequest */
 #define    MC_CMD_SRIOV_IN_LEN 12
 #define       MC_CMD_SRIOV_IN_ENABLE_OFST 0
+#define       MC_CMD_SRIOV_IN_ENABLE_LEN 4
 #define       MC_CMD_SRIOV_IN_VI_BASE_OFST 4
+#define       MC_CMD_SRIOV_IN_VI_BASE_LEN 4
 #define       MC_CMD_SRIOV_IN_VF_COUNT_OFST 8
+#define       MC_CMD_SRIOV_IN_VF_COUNT_LEN 4
 
 /* MC_CMD_SRIOV_OUT msgresponse */
 #define    MC_CMD_SRIOV_OUT_LEN 8
 #define       MC_CMD_SRIOV_OUT_VI_SCALE_OFST 0
+#define       MC_CMD_SRIOV_OUT_VI_SCALE_LEN 4
 #define       MC_CMD_SRIOV_OUT_VF_TOTAL_OFST 4
+#define       MC_CMD_SRIOV_OUT_VF_TOTAL_LEN 4
 
 /* MC_CMD_MEMCPY_RECORD_TYPEDEF structuredef */
 #define    MC_CMD_MEMCPY_RECORD_TYPEDEF_LEN 32
 /* this is only used for the first record */
 #define       MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_OFST 0
+#define       MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_LEN 4
 #define       MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_LBN 0
 #define       MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_WIDTH 32
 #define       MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_OFST 4
+#define       MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_LEN 4
 #define       MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_LBN 32
 #define       MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_WIDTH 32
 #define       MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_OFST 8
 #define       MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LBN 64
 #define       MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_WIDTH 64
 #define       MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_OFST 16
+#define       MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_LEN 4
 #define          MC_CMD_MEMCPY_RECORD_TYPEDEF_RID_INLINE 0x100 /* enum */
 #define       MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_LBN 128
 #define       MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_WIDTH 32
 #define       MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LBN 160
 #define       MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_WIDTH 64
 #define       MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_OFST 28
+#define       MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_LEN 4
 #define       MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_LBN 224
 #define       MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_WIDTH 32
 
 /* MC_CMD_WOL_FILTER_SET_IN msgrequest */
 #define    MC_CMD_WOL_FILTER_SET_IN_LEN 192
 #define       MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0
+#define       MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4
 #define          MC_CMD_FILTER_MODE_SIMPLE    0x0 /* enum */
 #define          MC_CMD_FILTER_MODE_STRUCTURED 0xffffffff /* enum */
 /* A type value of 1 is unused. */
 #define       MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4
+#define       MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4
 /* enum: Magic */
 #define          MC_CMD_WOL_TYPE_MAGIC      0x0
 /* enum: MS Windows Magic */
 /* MC_CMD_WOL_FILTER_SET_IN_MAGIC msgrequest */
 #define    MC_CMD_WOL_FILTER_SET_IN_MAGIC_LEN 16
 /*            MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */
+/*            MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4 */
 /*            MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */
+/*            MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4 */
 #define       MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_OFST 8
 #define       MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_LEN 8
 #define       MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_LO_OFST 8
 /* MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN msgrequest */
 #define    MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_LEN 20
 /*            MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */
+/*            MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4 */
 /*            MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */
+/*            MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4 */
 #define       MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_IP_OFST 8
+#define       MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_IP_LEN 4
 #define       MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_IP_OFST 12
+#define       MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_IP_LEN 4
 #define       MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_PORT_OFST 16
 #define       MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_PORT_LEN 2
 #define       MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_PORT_OFST 18
 /* MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN msgrequest */
 #define    MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_LEN 44
 /*            MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */
+/*            MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4 */
 /*            MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */
+/*            MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4 */
 #define       MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_IP_OFST 8
 #define       MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_IP_LEN 16
 #define       MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_DST_IP_OFST 24
 /* MC_CMD_WOL_FILTER_SET_IN_BITMAP msgrequest */
 #define    MC_CMD_WOL_FILTER_SET_IN_BITMAP_LEN 187
 /*            MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */
+/*            MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4 */
 /*            MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */
+/*            MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4 */
 #define       MC_CMD_WOL_FILTER_SET_IN_BITMAP_MASK_OFST 8
 #define       MC_CMD_WOL_FILTER_SET_IN_BITMAP_MASK_LEN 48
 #define       MC_CMD_WOL_FILTER_SET_IN_BITMAP_BITMAP_OFST 56
 /* MC_CMD_WOL_FILTER_SET_IN_LINK msgrequest */
 #define    MC_CMD_WOL_FILTER_SET_IN_LINK_LEN 12
 /*            MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */
+/*            MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4 */
 /*            MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */
+/*            MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4 */
 #define       MC_CMD_WOL_FILTER_SET_IN_LINK_MASK_OFST 8
+#define       MC_CMD_WOL_FILTER_SET_IN_LINK_MASK_LEN 4
 #define        MC_CMD_WOL_FILTER_SET_IN_LINK_UP_LBN 0
 #define        MC_CMD_WOL_FILTER_SET_IN_LINK_UP_WIDTH 1
 #define        MC_CMD_WOL_FILTER_SET_IN_LINK_DOWN_LBN 1
 /* MC_CMD_WOL_FILTER_SET_OUT msgresponse */
 #define    MC_CMD_WOL_FILTER_SET_OUT_LEN 4
 #define       MC_CMD_WOL_FILTER_SET_OUT_FILTER_ID_OFST 0
+#define       MC_CMD_WOL_FILTER_SET_OUT_FILTER_ID_LEN 4
 
 
 /***********************************/
 /* MC_CMD_WOL_FILTER_REMOVE_IN msgrequest */
 #define    MC_CMD_WOL_FILTER_REMOVE_IN_LEN 4
 #define       MC_CMD_WOL_FILTER_REMOVE_IN_FILTER_ID_OFST 0
+#define       MC_CMD_WOL_FILTER_REMOVE_IN_FILTER_ID_LEN 4
 
 /* MC_CMD_WOL_FILTER_REMOVE_OUT msgresponse */
 #define    MC_CMD_WOL_FILTER_REMOVE_OUT_LEN 0
 /* MC_CMD_WOL_FILTER_RESET_IN msgrequest */
 #define    MC_CMD_WOL_FILTER_RESET_IN_LEN 4
 #define       MC_CMD_WOL_FILTER_RESET_IN_MASK_OFST 0
+#define       MC_CMD_WOL_FILTER_RESET_IN_MASK_LEN 4
 #define          MC_CMD_WOL_FILTER_RESET_IN_WAKE_FILTERS 0x1 /* enum */
 #define          MC_CMD_WOL_FILTER_RESET_IN_LIGHTSOUT_OFFLOADS 0x2 /* enum */
 
 #define    MC_CMD_NVRAM_TYPES_OUT_LEN 4
 /* Bit mask of supported types. */
 #define       MC_CMD_NVRAM_TYPES_OUT_TYPES_OFST 0
+#define       MC_CMD_NVRAM_TYPES_OUT_TYPES_LEN 4
 /* enum: Disabled callisto. */
 #define          MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO 0x0
 /* enum: MC firmware. */
 /* MC_CMD_NVRAM_INFO_IN msgrequest */
 #define    MC_CMD_NVRAM_INFO_IN_LEN 4
 #define       MC_CMD_NVRAM_INFO_IN_TYPE_OFST 0
+#define       MC_CMD_NVRAM_INFO_IN_TYPE_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
 
 /* MC_CMD_NVRAM_INFO_OUT msgresponse */
 #define    MC_CMD_NVRAM_INFO_OUT_LEN 24
 #define       MC_CMD_NVRAM_INFO_OUT_TYPE_OFST 0
+#define       MC_CMD_NVRAM_INFO_OUT_TYPE_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
 #define       MC_CMD_NVRAM_INFO_OUT_SIZE_OFST 4
+#define       MC_CMD_NVRAM_INFO_OUT_SIZE_LEN 4
 #define       MC_CMD_NVRAM_INFO_OUT_ERASESIZE_OFST 8
+#define       MC_CMD_NVRAM_INFO_OUT_ERASESIZE_LEN 4
 #define       MC_CMD_NVRAM_INFO_OUT_FLAGS_OFST 12
+#define       MC_CMD_NVRAM_INFO_OUT_FLAGS_LEN 4
 #define        MC_CMD_NVRAM_INFO_OUT_PROTECTED_LBN 0
 #define        MC_CMD_NVRAM_INFO_OUT_PROTECTED_WIDTH 1
 #define        MC_CMD_NVRAM_INFO_OUT_TLV_LBN 1
 #define        MC_CMD_NVRAM_INFO_OUT_TLV_WIDTH 1
+#define        MC_CMD_NVRAM_INFO_OUT_READ_ONLY_LBN 5
+#define        MC_CMD_NVRAM_INFO_OUT_READ_ONLY_WIDTH 1
 #define        MC_CMD_NVRAM_INFO_OUT_CMAC_LBN 6
 #define        MC_CMD_NVRAM_INFO_OUT_CMAC_WIDTH 1
 #define        MC_CMD_NVRAM_INFO_OUT_A_B_LBN 7
 #define        MC_CMD_NVRAM_INFO_OUT_A_B_WIDTH 1
 #define       MC_CMD_NVRAM_INFO_OUT_PHYSDEV_OFST 16
+#define       MC_CMD_NVRAM_INFO_OUT_PHYSDEV_LEN 4
 #define       MC_CMD_NVRAM_INFO_OUT_PHYSADDR_OFST 20
+#define       MC_CMD_NVRAM_INFO_OUT_PHYSADDR_LEN 4
 
 /* MC_CMD_NVRAM_INFO_V2_OUT msgresponse */
 #define    MC_CMD_NVRAM_INFO_V2_OUT_LEN 28
 #define       MC_CMD_NVRAM_INFO_V2_OUT_TYPE_OFST 0
+#define       MC_CMD_NVRAM_INFO_V2_OUT_TYPE_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
 #define       MC_CMD_NVRAM_INFO_V2_OUT_SIZE_OFST 4
+#define       MC_CMD_NVRAM_INFO_V2_OUT_SIZE_LEN 4
 #define       MC_CMD_NVRAM_INFO_V2_OUT_ERASESIZE_OFST 8
+#define       MC_CMD_NVRAM_INFO_V2_OUT_ERASESIZE_LEN 4
 #define       MC_CMD_NVRAM_INFO_V2_OUT_FLAGS_OFST 12
+#define       MC_CMD_NVRAM_INFO_V2_OUT_FLAGS_LEN 4
 #define        MC_CMD_NVRAM_INFO_V2_OUT_PROTECTED_LBN 0
 #define        MC_CMD_NVRAM_INFO_V2_OUT_PROTECTED_WIDTH 1
 #define        MC_CMD_NVRAM_INFO_V2_OUT_TLV_LBN 1
 #define        MC_CMD_NVRAM_INFO_V2_OUT_TLV_WIDTH 1
+#define        MC_CMD_NVRAM_INFO_V2_OUT_READ_ONLY_LBN 5
+#define        MC_CMD_NVRAM_INFO_V2_OUT_READ_ONLY_WIDTH 1
 #define        MC_CMD_NVRAM_INFO_V2_OUT_A_B_LBN 7
 #define        MC_CMD_NVRAM_INFO_V2_OUT_A_B_WIDTH 1
 #define       MC_CMD_NVRAM_INFO_V2_OUT_PHYSDEV_OFST 16
+#define       MC_CMD_NVRAM_INFO_V2_OUT_PHYSDEV_LEN 4
 #define       MC_CMD_NVRAM_INFO_V2_OUT_PHYSADDR_OFST 20
+#define       MC_CMD_NVRAM_INFO_V2_OUT_PHYSADDR_LEN 4
 /* Writes must be multiples of this size. Added to support the MUM on Sorrento.
  */
 #define       MC_CMD_NVRAM_INFO_V2_OUT_WRITESIZE_OFST 24
+#define       MC_CMD_NVRAM_INFO_V2_OUT_WRITESIZE_LEN 4
 
 
 /***********************************/
  */
 #define    MC_CMD_NVRAM_UPDATE_START_IN_LEN 4
 #define       MC_CMD_NVRAM_UPDATE_START_IN_TYPE_OFST 0
+#define       MC_CMD_NVRAM_UPDATE_START_IN_TYPE_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
 
  */
 #define    MC_CMD_NVRAM_UPDATE_START_V2_IN_LEN 8
 #define       MC_CMD_NVRAM_UPDATE_START_V2_IN_TYPE_OFST 0
+#define       MC_CMD_NVRAM_UPDATE_START_V2_IN_TYPE_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
 #define       MC_CMD_NVRAM_UPDATE_START_V2_IN_FLAGS_OFST 4
+#define       MC_CMD_NVRAM_UPDATE_START_V2_IN_FLAGS_LEN 4
 #define        MC_CMD_NVRAM_UPDATE_START_V2_IN_FLAG_REPORT_VERIFY_RESULT_LBN 0
 #define        MC_CMD_NVRAM_UPDATE_START_V2_IN_FLAG_REPORT_VERIFY_RESULT_WIDTH 1
 
 /* MC_CMD_NVRAM_READ_IN msgrequest */
 #define    MC_CMD_NVRAM_READ_IN_LEN 12
 #define       MC_CMD_NVRAM_READ_IN_TYPE_OFST 0
+#define       MC_CMD_NVRAM_READ_IN_TYPE_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
 #define       MC_CMD_NVRAM_READ_IN_OFFSET_OFST 4
+#define       MC_CMD_NVRAM_READ_IN_OFFSET_LEN 4
 /* amount to read in bytes */
 #define       MC_CMD_NVRAM_READ_IN_LENGTH_OFST 8
+#define       MC_CMD_NVRAM_READ_IN_LENGTH_LEN 4
 
 /* MC_CMD_NVRAM_READ_IN_V2 msgrequest */
 #define    MC_CMD_NVRAM_READ_IN_V2_LEN 16
 #define       MC_CMD_NVRAM_READ_IN_V2_TYPE_OFST 0
+#define       MC_CMD_NVRAM_READ_IN_V2_TYPE_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
 #define       MC_CMD_NVRAM_READ_IN_V2_OFFSET_OFST 4
+#define       MC_CMD_NVRAM_READ_IN_V2_OFFSET_LEN 4
 /* amount to read in bytes */
 #define       MC_CMD_NVRAM_READ_IN_V2_LENGTH_OFST 8
+#define       MC_CMD_NVRAM_READ_IN_V2_LENGTH_LEN 4
 /* Optional control info. If a partition is stored with an A/B versioning
  * scheme (i.e. in more than one physical partition in NVRAM) the host can set
  * this to control which underlying physical partition is used to read data
  * verifying by reading with MODE=TARGET_BACKUP.
  */
 #define       MC_CMD_NVRAM_READ_IN_V2_MODE_OFST 12
+#define       MC_CMD_NVRAM_READ_IN_V2_MODE_LEN 4
 /* enum: Same as omitting MODE: caller sees data in current partition unless it
  * holds the write lock in which case it sees data in the partition it is
  * updating.
 #define    MC_CMD_NVRAM_WRITE_IN_LENMAX 252
 #define    MC_CMD_NVRAM_WRITE_IN_LEN(num) (12+1*(num))
 #define       MC_CMD_NVRAM_WRITE_IN_TYPE_OFST 0
+#define       MC_CMD_NVRAM_WRITE_IN_TYPE_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
 #define       MC_CMD_NVRAM_WRITE_IN_OFFSET_OFST 4
+#define       MC_CMD_NVRAM_WRITE_IN_OFFSET_LEN 4
 #define       MC_CMD_NVRAM_WRITE_IN_LENGTH_OFST 8
+#define       MC_CMD_NVRAM_WRITE_IN_LENGTH_LEN 4
 #define       MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_OFST 12
 #define       MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_LEN 1
 #define       MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_MINNUM 1
 /* MC_CMD_NVRAM_ERASE_IN msgrequest */
 #define    MC_CMD_NVRAM_ERASE_IN_LEN 12
 #define       MC_CMD_NVRAM_ERASE_IN_TYPE_OFST 0
+#define       MC_CMD_NVRAM_ERASE_IN_TYPE_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
 #define       MC_CMD_NVRAM_ERASE_IN_OFFSET_OFST 4
+#define       MC_CMD_NVRAM_ERASE_IN_OFFSET_LEN 4
 #define       MC_CMD_NVRAM_ERASE_IN_LENGTH_OFST 8
+#define       MC_CMD_NVRAM_ERASE_IN_LENGTH_LEN 4
 
 /* MC_CMD_NVRAM_ERASE_OUT msgresponse */
 #define    MC_CMD_NVRAM_ERASE_OUT_LEN 0
  */
 #define    MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN 8
 #define       MC_CMD_NVRAM_UPDATE_FINISH_IN_TYPE_OFST 0
+#define       MC_CMD_NVRAM_UPDATE_FINISH_IN_TYPE_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
 #define       MC_CMD_NVRAM_UPDATE_FINISH_IN_REBOOT_OFST 4
+#define       MC_CMD_NVRAM_UPDATE_FINISH_IN_REBOOT_LEN 4
 
 /* MC_CMD_NVRAM_UPDATE_FINISH_V2_IN msgrequest: Extended NVRAM_UPDATE_FINISH
  * request with additional flags indicating version of NVRAM_UPDATE commands in
  */
 #define    MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_LEN 12
 #define       MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_TYPE_OFST 0
+#define       MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_TYPE_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
 #define       MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_REBOOT_OFST 4
+#define       MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_REBOOT_LEN 4
 #define       MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAGS_OFST 8
+#define       MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAGS_LEN 4
 #define        MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_REPORT_VERIFY_RESULT_LBN 0
 #define        MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_REPORT_VERIFY_RESULT_WIDTH 1
 
  * This process takes a few seconds to complete. So is likely to take more than
  * the MCDI timeout. Hence signature verification is initiated when
  * MC_CMD_NVRAM_UPDATE_FINISH_V2_IN is received by the firmware, however, the
- * MCDI command returns immediately with error code EAGAIN. Subsequent
- * NVRAM_UPDATE_FINISH_V2_IN requests also return EAGAIN if the verification is
- * in progress. Once the verification has completed, this response payload
- * includes the results of the signature verification. Note that the nvram lock
- * in firmware is only released after the verification has completed and the
- * host has read back the result code from firmware.
+ * MCDI command is run in a background MCDI processing thread. This response
+ * payload includes the results of the signature verification. Note that the
+ * per-partition nvram lock in firmware is only released after the verification
+ * has completed.
  */
 #define    MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_LEN 4
 /* Result of nvram update completion processing */
 #define       MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_RESULT_CODE_OFST 0
+#define       MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_RESULT_CODE_LEN 4
+/* enum: Invalid return code; only non-zero values are defined. Defined as
+ * unknown for backwards compatibility with NVRAM_UPDATE_FINISH_OUT.
+ */
+#define          MC_CMD_NVRAM_VERIFY_RC_UNKNOWN 0x0
 /* enum: Verify succeeded without any errors. */
 #define          MC_CMD_NVRAM_VERIFY_RC_SUCCESS 0x1
 /* enum: CMS format verification failed due to an internal error. */
  * Trusted approver's list.
  */
 #define          MC_CMD_NVRAM_VERIFY_RC_NO_SIGNATURE_MATCH 0xb
+/* enum: The image contains a test-signed certificate, but the adapter accepts
+ * only production signed images.
+ */
+#define          MC_CMD_NVRAM_VERIFY_RC_REJECT_TEST_SIGNED 0xc
+/* enum: The image has a lower security level than the current firmware. */
+#define          MC_CMD_NVRAM_VERIFY_RC_SECURITY_LEVEL_DOWNGRADE 0xd
 
 
 /***********************************/
 /* MC_CMD_REBOOT_IN msgrequest */
 #define    MC_CMD_REBOOT_IN_LEN 4
 #define       MC_CMD_REBOOT_IN_FLAGS_OFST 0
+#define       MC_CMD_REBOOT_IN_FLAGS_LEN 4
 #define          MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION 0x1 /* enum */
 
 /* MC_CMD_REBOOT_OUT msgresponse */
  */
 #define MC_CMD_REBOOT_MODE 0x3f
 
-#define MC_CMD_0x3f_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x3f_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_REBOOT_MODE_IN msgrequest */
 #define    MC_CMD_REBOOT_MODE_IN_LEN 4
 #define       MC_CMD_REBOOT_MODE_IN_VALUE_OFST 0
+#define       MC_CMD_REBOOT_MODE_IN_VALUE_LEN 4
 /* enum: Normal. */
 #define          MC_CMD_REBOOT_MODE_NORMAL 0x0
 /* enum: Power-on Reset. */
 /* MC_CMD_REBOOT_MODE_OUT msgresponse */
 #define    MC_CMD_REBOOT_MODE_OUT_LEN 4
 #define       MC_CMD_REBOOT_MODE_OUT_VALUE_OFST 0
+#define       MC_CMD_REBOOT_MODE_OUT_VALUE_LEN 4
 
 
 /***********************************/
  */
 #define MC_CMD_SENSOR_INFO 0x41
 
-#define MC_CMD_0x41_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x41_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
 /* MC_CMD_SENSOR_INFO_IN msgrequest */
 #define    MC_CMD_SENSOR_INFO_IN_LEN 0
  * Page 1 contains sensors 32 to 62 (sensor 63 is the next page bit). etc.
  */
 #define       MC_CMD_SENSOR_INFO_EXT_IN_PAGE_OFST 0
+#define       MC_CMD_SENSOR_INFO_EXT_IN_PAGE_LEN 4
 
 /* MC_CMD_SENSOR_INFO_OUT msgresponse */
 #define    MC_CMD_SENSOR_INFO_OUT_LENMIN 4
 #define    MC_CMD_SENSOR_INFO_OUT_LENMAX 252
 #define    MC_CMD_SENSOR_INFO_OUT_LEN(num) (4+8*(num))
 #define       MC_CMD_SENSOR_INFO_OUT_MASK_OFST 0
+#define       MC_CMD_SENSOR_INFO_OUT_MASK_LEN 4
 /* enum: Controller temperature: degC */
 #define          MC_CMD_SENSOR_CONTROLLER_TEMP  0x0
 /* enum: Phy common temperature: degC */
 #define          MC_CMD_SENSOR_BOARD_FRONT_TEMP  0x4f
 /* enum: Board temperature (back): degC */
 #define          MC_CMD_SENSOR_BOARD_BACK_TEMP  0x50
+/* enum: 1.8v power current: mA */
+#define          MC_CMD_SENSOR_IN_I1V8  0x51
+/* enum: 2.5v power current: mA */
+#define          MC_CMD_SENSOR_IN_I2V5  0x52
+/* enum: 3.3v power current: mA */
+#define          MC_CMD_SENSOR_IN_I3V3  0x53
+/* enum: 12v power current: mA */
+#define          MC_CMD_SENSOR_IN_I12V0  0x54
+/* enum: 1.3v power: mV */
+#define          MC_CMD_SENSOR_IN_1V3  0x55
+/* enum: 1.3v power current: mA */
+#define          MC_CMD_SENSOR_IN_I1V3  0x56
+/* enum: Not a sensor: reserved for the next page flag */
+#define          MC_CMD_SENSOR_PAGE2_NEXT  0x5f
 /* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF */
 #define       MC_CMD_SENSOR_ENTRY_OFST 4
 #define       MC_CMD_SENSOR_ENTRY_LEN 8
 #define    MC_CMD_SENSOR_INFO_EXT_OUT_LENMAX 252
 #define    MC_CMD_SENSOR_INFO_EXT_OUT_LEN(num) (4+8*(num))
 #define       MC_CMD_SENSOR_INFO_EXT_OUT_MASK_OFST 0
+#define       MC_CMD_SENSOR_INFO_EXT_OUT_MASK_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_SENSOR_INFO_OUT */
 #define        MC_CMD_SENSOR_INFO_EXT_OUT_NEXT_PAGE_LBN 31
  */
 #define MC_CMD_READ_SENSORS 0x42
 
-#define MC_CMD_0x42_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x42_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
 /* MC_CMD_READ_SENSORS_IN msgrequest */
 #define    MC_CMD_READ_SENSORS_IN_LEN 8
 #define       MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_HI_OFST 4
 /* Size in bytes of host buffer. */
 #define       MC_CMD_READ_SENSORS_EXT_IN_LENGTH_OFST 8
+#define       MC_CMD_READ_SENSORS_EXT_IN_LENGTH_LEN 4
 
 /* MC_CMD_READ_SENSORS_OUT msgresponse */
 #define    MC_CMD_READ_SENSORS_OUT_LEN 0
 /* MC_CMD_GET_PHY_STATE_OUT msgresponse */
 #define    MC_CMD_GET_PHY_STATE_OUT_LEN 4
 #define       MC_CMD_GET_PHY_STATE_OUT_STATE_OFST 0
+#define       MC_CMD_GET_PHY_STATE_OUT_STATE_LEN 4
 /* enum: Ok. */
 #define          MC_CMD_PHY_STATE_OK 0x1
 /* enum: Faulty. */
 /* MC_CMD_WOL_FILTER_GET_OUT msgresponse */
 #define    MC_CMD_WOL_FILTER_GET_OUT_LEN 4
 #define       MC_CMD_WOL_FILTER_GET_OUT_FILTER_ID_OFST 0
+#define       MC_CMD_WOL_FILTER_GET_OUT_FILTER_ID_LEN 4
 
 
 /***********************************/
 #define    MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LENMAX 252
 #define    MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LEN(num) (4+4*(num))
 #define       MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0
+#define       MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_LEN 4
 #define          MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_ARP 0x1 /* enum */
 #define          MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_NS  0x2 /* enum */
 #define       MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_OFST 4
 /* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP msgrequest */
 #define    MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_LEN 14
 /*            MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0 */
+/*            MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_LEN 4 */
 #define       MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_MAC_OFST 4
 #define       MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_MAC_LEN 6
 #define       MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_IP_OFST 10
+#define       MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_IP_LEN 4
 
 /* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS msgrequest */
 #define    MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_LEN 42
 /*            MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0 */
+/*            MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_LEN 4 */
 #define       MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_MAC_OFST 4
 #define       MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_MAC_LEN 6
 #define       MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_SNIPV6_OFST 10
 /* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT msgresponse */
 #define    MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_LEN 4
 #define       MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_FILTER_ID_OFST 0
+#define       MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_FILTER_ID_LEN 4
 
 
 /***********************************/
 /* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN msgrequest */
 #define    MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_LEN 8
 #define       MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0
+#define       MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_LEN 4
 #define       MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_FILTER_ID_OFST 4
+#define       MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_FILTER_ID_LEN 4
 
 /* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_OUT msgresponse */
 #define    MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_OUT_LEN 0
 #define    MC_CMD_TESTASSERT_V2_IN_LEN 4
 /* How to provoke the assertion */
 #define       MC_CMD_TESTASSERT_V2_IN_TYPE_OFST 0
+#define       MC_CMD_TESTASSERT_V2_IN_TYPE_LEN 4
 /* enum: Assert using the FAIL_ASSERTION_WITH_USEFUL_VALUES macro. Unless
  * you're testing firmware, this is what you want.
  */
 #define    MC_CMD_WORKAROUND_IN_LEN 8
 /* The enums here must correspond with those in MC_CMD_GET_WORKAROUND. */
 #define       MC_CMD_WORKAROUND_IN_TYPE_OFST 0
+#define       MC_CMD_WORKAROUND_IN_TYPE_LEN 4
 /* enum: Bug 17230 work around. */
 #define          MC_CMD_WORKAROUND_BUG17230 0x1
 /* enum: Bug 35388 work around (unsafe EVQ writes). */
  * the workaround
  */
 #define       MC_CMD_WORKAROUND_IN_ENABLED_OFST 4
+#define       MC_CMD_WORKAROUND_IN_ENABLED_LEN 4
 
 /* MC_CMD_WORKAROUND_OUT msgresponse */
 #define    MC_CMD_WORKAROUND_OUT_LEN 0
  */
 #define    MC_CMD_WORKAROUND_EXT_OUT_LEN 4
 #define       MC_CMD_WORKAROUND_EXT_OUT_FLAGS_OFST 0
+#define       MC_CMD_WORKAROUND_EXT_OUT_FLAGS_LEN 4
 #define        MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_LBN 0
 #define        MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_WIDTH 1
 
 /* MC_CMD_GET_PHY_MEDIA_INFO_IN msgrequest */
 #define    MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN 4
 #define       MC_CMD_GET_PHY_MEDIA_INFO_IN_PAGE_OFST 0
+#define       MC_CMD_GET_PHY_MEDIA_INFO_IN_PAGE_LEN 4
 
 /* MC_CMD_GET_PHY_MEDIA_INFO_OUT msgresponse */
 #define    MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMIN 5
 #define    MC_CMD_GET_PHY_MEDIA_INFO_OUT_LEN(num) (4+1*(num))
 /* in bytes */
 #define       MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATALEN_OFST 0
+#define       MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATALEN_LEN 4
 #define       MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_OFST 4
 #define       MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_LEN 1
 #define       MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_MINNUM 1
 /* MC_CMD_NVRAM_TEST_IN msgrequest */
 #define    MC_CMD_NVRAM_TEST_IN_LEN 4
 #define       MC_CMD_NVRAM_TEST_IN_TYPE_OFST 0
+#define       MC_CMD_NVRAM_TEST_IN_TYPE_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
 
 /* MC_CMD_NVRAM_TEST_OUT msgresponse */
 #define    MC_CMD_NVRAM_TEST_OUT_LEN 4
 #define       MC_CMD_NVRAM_TEST_OUT_RESULT_OFST 0
+#define       MC_CMD_NVRAM_TEST_OUT_RESULT_LEN 4
 /* enum: Passed. */
 #define          MC_CMD_NVRAM_TEST_PASS 0x0
 /* enum: Failed. */
 #define    MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_LEN 16
 /* 0-6 low->high de-emph. */
 #define       MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_LEVEL_OFST 0
+#define       MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_LEVEL_LEN 4
 /* 0-8 low->high ref.V */
 #define       MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_DT_CFG_OFST 4
+#define       MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_DT_CFG_LEN 4
 /* 0-8 0-8 low->high boost */
 #define       MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_BOOST_OFST 8
+#define       MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_BOOST_LEN 4
 /* 0-8 low->high ref.V */
 #define       MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_DT_CFG_OFST 12
+#define       MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_DT_CFG_LEN 4
 
 /* MC_CMD_MRSFP_TWEAK_IN_READ_ONLY msgrequest */
 #define    MC_CMD_MRSFP_TWEAK_IN_READ_ONLY_LEN 0
 #define    MC_CMD_MRSFP_TWEAK_OUT_LEN 12
 /* input bits */
 #define       MC_CMD_MRSFP_TWEAK_OUT_IOEXP_INPUTS_OFST 0
+#define       MC_CMD_MRSFP_TWEAK_OUT_IOEXP_INPUTS_LEN 4
 /* output bits */
 #define       MC_CMD_MRSFP_TWEAK_OUT_IOEXP_OUTPUTS_OFST 4
+#define       MC_CMD_MRSFP_TWEAK_OUT_IOEXP_OUTPUTS_LEN 4
 /* direction */
 #define       MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OFST 8
+#define       MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_LEN 4
 /* enum: Out. */
 #define          MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OUT 0x0
 /* enum: In. */
  */
 #define MC_CMD_SENSOR_SET_LIMS 0x4e
 
-#define MC_CMD_0x4e_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x4e_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_SENSOR_SET_LIMS_IN msgrequest */
 #define    MC_CMD_SENSOR_SET_LIMS_IN_LEN 20
 #define       MC_CMD_SENSOR_SET_LIMS_IN_SENSOR_OFST 0
+#define       MC_CMD_SENSOR_SET_LIMS_IN_SENSOR_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_SENSOR_INFO/MC_CMD_SENSOR_INFO_OUT/MASK */
 /* interpretation is is sensor-specific. */
 #define       MC_CMD_SENSOR_SET_LIMS_IN_LOW0_OFST 4
+#define       MC_CMD_SENSOR_SET_LIMS_IN_LOW0_LEN 4
 /* interpretation is is sensor-specific. */
 #define       MC_CMD_SENSOR_SET_LIMS_IN_HI0_OFST 8
+#define       MC_CMD_SENSOR_SET_LIMS_IN_HI0_LEN 4
 /* interpretation is is sensor-specific. */
 #define       MC_CMD_SENSOR_SET_LIMS_IN_LOW1_OFST 12
+#define       MC_CMD_SENSOR_SET_LIMS_IN_LOW1_LEN 4
 /* interpretation is is sensor-specific. */
 #define       MC_CMD_SENSOR_SET_LIMS_IN_HI1_OFST 16
+#define       MC_CMD_SENSOR_SET_LIMS_IN_HI1_LEN 4
 
 /* MC_CMD_SENSOR_SET_LIMS_OUT msgresponse */
 #define    MC_CMD_SENSOR_SET_LIMS_OUT_LEN 0
 /* MC_CMD_GET_RESOURCE_LIMITS_OUT msgresponse */
 #define    MC_CMD_GET_RESOURCE_LIMITS_OUT_LEN 16
 #define       MC_CMD_GET_RESOURCE_LIMITS_OUT_BUFTBL_OFST 0
+#define       MC_CMD_GET_RESOURCE_LIMITS_OUT_BUFTBL_LEN 4
 #define       MC_CMD_GET_RESOURCE_LIMITS_OUT_EVQ_OFST 4
+#define       MC_CMD_GET_RESOURCE_LIMITS_OUT_EVQ_LEN 4
 #define       MC_CMD_GET_RESOURCE_LIMITS_OUT_RXQ_OFST 8
+#define       MC_CMD_GET_RESOURCE_LIMITS_OUT_RXQ_LEN 4
 #define       MC_CMD_GET_RESOURCE_LIMITS_OUT_TXQ_OFST 12
+#define       MC_CMD_GET_RESOURCE_LIMITS_OUT_TXQ_LEN 4
 
 
 /***********************************/
 #define    MC_CMD_NVRAM_PARTITIONS_OUT_LEN(num) (4+4*(num))
 /* total number of partitions */
 #define       MC_CMD_NVRAM_PARTITIONS_OUT_NUM_PARTITIONS_OFST 0
+#define       MC_CMD_NVRAM_PARTITIONS_OUT_NUM_PARTITIONS_LEN 4
 /* type ID code for each of NUM_PARTITIONS partitions */
 #define       MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_OFST 4
 #define       MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_LEN 4
 #define    MC_CMD_NVRAM_METADATA_IN_LEN 4
 /* Partition type ID code */
 #define       MC_CMD_NVRAM_METADATA_IN_TYPE_OFST 0
+#define       MC_CMD_NVRAM_METADATA_IN_TYPE_LEN 4
 
 /* MC_CMD_NVRAM_METADATA_OUT msgresponse */
 #define    MC_CMD_NVRAM_METADATA_OUT_LENMIN 20
 #define    MC_CMD_NVRAM_METADATA_OUT_LEN(num) (20+1*(num))
 /* Partition type ID code */
 #define       MC_CMD_NVRAM_METADATA_OUT_TYPE_OFST 0
+#define       MC_CMD_NVRAM_METADATA_OUT_TYPE_LEN 4
 #define       MC_CMD_NVRAM_METADATA_OUT_FLAGS_OFST 4
+#define       MC_CMD_NVRAM_METADATA_OUT_FLAGS_LEN 4
 #define        MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_LBN 0
 #define        MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_WIDTH 1
 #define        MC_CMD_NVRAM_METADATA_OUT_VERSION_VALID_LBN 1
 #define        MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_VALID_WIDTH 1
 /* Subtype ID code for content of this partition */
 #define       MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_OFST 8
+#define       MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_LEN 4
 /* 1st component of W.X.Y.Z version number for content of this partition */
 #define       MC_CMD_NVRAM_METADATA_OUT_VERSION_W_OFST 12
 #define       MC_CMD_NVRAM_METADATA_OUT_VERSION_W_LEN 2
 #define       MC_CMD_GET_MAC_ADDRESSES_OUT_RESERVED_LEN 2
 /* Number of allocated MAC addresses */
 #define       MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_COUNT_OFST 8
+#define       MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_COUNT_LEN 4
 /* Spacing of allocated MAC addresses */
 #define       MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_STRIDE_OFST 12
+#define       MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_STRIDE_LEN 4
 
 
 /***********************************/
 #define    MC_CMD_CLP_IN_LEN 4
 /* Sub operation */
 #define       MC_CMD_CLP_IN_OP_OFST 0
+#define       MC_CMD_CLP_IN_OP_LEN 4
 /* enum: Return to factory default settings */
 #define          MC_CMD_CLP_OP_DEFAULT 0x1
 /* enum: Set MAC address */
 /* MC_CMD_CLP_IN_DEFAULT msgrequest */
 #define    MC_CMD_CLP_IN_DEFAULT_LEN 4
 /*            MC_CMD_CLP_IN_OP_OFST 0 */
+/*            MC_CMD_CLP_IN_OP_LEN 4 */
 
 /* MC_CMD_CLP_OUT_DEFAULT msgresponse */
 #define    MC_CMD_CLP_OUT_DEFAULT_LEN 0
 /* MC_CMD_CLP_IN_SET_MAC msgrequest */
 #define    MC_CMD_CLP_IN_SET_MAC_LEN 12
 /*            MC_CMD_CLP_IN_OP_OFST 0 */
+/*            MC_CMD_CLP_IN_OP_LEN 4 */
 /* MAC address assigned to port */
 #define       MC_CMD_CLP_IN_SET_MAC_ADDR_OFST 4
 #define       MC_CMD_CLP_IN_SET_MAC_ADDR_LEN 6
 /* MC_CMD_CLP_IN_GET_MAC msgrequest */
 #define    MC_CMD_CLP_IN_GET_MAC_LEN 4
 /*            MC_CMD_CLP_IN_OP_OFST 0 */
+/*            MC_CMD_CLP_IN_OP_LEN 4 */
 
 /* MC_CMD_CLP_OUT_GET_MAC msgresponse */
 #define    MC_CMD_CLP_OUT_GET_MAC_LEN 8
 /* MC_CMD_CLP_IN_SET_BOOT msgrequest */
 #define    MC_CMD_CLP_IN_SET_BOOT_LEN 5
 /*            MC_CMD_CLP_IN_OP_OFST 0 */
+/*            MC_CMD_CLP_IN_OP_LEN 4 */
 /* Boot flag */
 #define       MC_CMD_CLP_IN_SET_BOOT_FLAG_OFST 4
 #define       MC_CMD_CLP_IN_SET_BOOT_FLAG_LEN 1
 /* MC_CMD_CLP_IN_GET_BOOT msgrequest */
 #define    MC_CMD_CLP_IN_GET_BOOT_LEN 4
 /*            MC_CMD_CLP_IN_OP_OFST 0 */
+/*            MC_CMD_CLP_IN_OP_LEN 4 */
 
 /* MC_CMD_CLP_OUT_GET_BOOT msgresponse */
 #define    MC_CMD_CLP_OUT_GET_BOOT_LEN 4
  */
 #define MC_CMD_MUM 0x57
 
-#define MC_CMD_0x57_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x57_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_MUM_IN msgrequest */
 #define    MC_CMD_MUM_IN_LEN 4
 #define       MC_CMD_MUM_IN_OP_HDR_OFST 0
+#define       MC_CMD_MUM_IN_OP_HDR_LEN 4
 #define        MC_CMD_MUM_IN_OP_LBN 0
 #define        MC_CMD_MUM_IN_OP_WIDTH 8
 /* enum: NULL MCDI command to MUM */
 #define    MC_CMD_MUM_IN_NULL_LEN 4
 /* MUM cmd header */
 #define       MC_CMD_MUM_IN_CMD_OFST 0
+#define       MC_CMD_MUM_IN_CMD_LEN 4
 
 /* MC_CMD_MUM_IN_GET_VERSION msgrequest */
 #define    MC_CMD_MUM_IN_GET_VERSION_LEN 4
 /* MUM cmd header */
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 
 /* MC_CMD_MUM_IN_READ msgrequest */
 #define    MC_CMD_MUM_IN_READ_LEN 16
 /* MUM cmd header */
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 /* ID of (device connected to MUM) to read from registers of */
 #define       MC_CMD_MUM_IN_READ_DEVICE_OFST 4
+#define       MC_CMD_MUM_IN_READ_DEVICE_LEN 4
 /* enum: Hittite HMC1035 clock generator on Sorrento board */
 #define          MC_CMD_MUM_DEV_HITTITE 0x1
 /* enum: Hittite HMC1035 clock generator for NIC-side on Sorrento board */
 #define          MC_CMD_MUM_DEV_HITTITE_NIC 0x2
 /* 32-bit address to read from */
 #define       MC_CMD_MUM_IN_READ_ADDR_OFST 8
+#define       MC_CMD_MUM_IN_READ_ADDR_LEN 4
 /* Number of words to read. */
 #define       MC_CMD_MUM_IN_READ_NUMWORDS_OFST 12
+#define       MC_CMD_MUM_IN_READ_NUMWORDS_LEN 4
 
 /* MC_CMD_MUM_IN_WRITE msgrequest */
 #define    MC_CMD_MUM_IN_WRITE_LENMIN 16
 #define    MC_CMD_MUM_IN_WRITE_LEN(num) (12+4*(num))
 /* MUM cmd header */
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 /* ID of (device connected to MUM) to write to registers of */
 #define       MC_CMD_MUM_IN_WRITE_DEVICE_OFST 4
+#define       MC_CMD_MUM_IN_WRITE_DEVICE_LEN 4
 /* enum: Hittite HMC1035 clock generator on Sorrento board */
 /*               MC_CMD_MUM_DEV_HITTITE 0x1 */
 /* 32-bit address to write to */
 #define       MC_CMD_MUM_IN_WRITE_ADDR_OFST 8
+#define       MC_CMD_MUM_IN_WRITE_ADDR_LEN 4
 /* Words to write */
 #define       MC_CMD_MUM_IN_WRITE_BUFFER_OFST 12
 #define       MC_CMD_MUM_IN_WRITE_BUFFER_LEN 4
 #define    MC_CMD_MUM_IN_RAW_CMD_LEN(num) (16+1*(num))
 /* MUM cmd header */
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 /* MUM I2C cmd code */
 #define       MC_CMD_MUM_IN_RAW_CMD_CMD_CODE_OFST 4
+#define       MC_CMD_MUM_IN_RAW_CMD_CMD_CODE_LEN 4
 /* Number of bytes to write */
 #define       MC_CMD_MUM_IN_RAW_CMD_NUM_WRITE_OFST 8
+#define       MC_CMD_MUM_IN_RAW_CMD_NUM_WRITE_LEN 4
 /* Number of bytes to read */
 #define       MC_CMD_MUM_IN_RAW_CMD_NUM_READ_OFST 12
+#define       MC_CMD_MUM_IN_RAW_CMD_NUM_READ_LEN 4
 /* Bytes to write */
 #define       MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_OFST 16
 #define       MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_LEN 1
 #define    MC_CMD_MUM_IN_LOG_LEN 8
 /* MUM cmd header */
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 #define       MC_CMD_MUM_IN_LOG_OP_OFST 4
+#define       MC_CMD_MUM_IN_LOG_OP_LEN 4
 #define          MC_CMD_MUM_IN_LOG_OP_UART  0x1 /* enum */
 
 /* MC_CMD_MUM_IN_LOG_OP_UART msgrequest */
 #define    MC_CMD_MUM_IN_LOG_OP_UART_LEN 12
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 /*            MC_CMD_MUM_IN_LOG_OP_OFST 4 */
+/*            MC_CMD_MUM_IN_LOG_OP_LEN 4 */
 /* Enable/disable debug output to UART */
 #define       MC_CMD_MUM_IN_LOG_OP_UART_ENABLE_OFST 8
+#define       MC_CMD_MUM_IN_LOG_OP_UART_ENABLE_LEN 4
 
 /* MC_CMD_MUM_IN_GPIO msgrequest */
 #define    MC_CMD_MUM_IN_GPIO_LEN 8
 /* MUM cmd header */
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 #define       MC_CMD_MUM_IN_GPIO_HDR_OFST 4
+#define       MC_CMD_MUM_IN_GPIO_HDR_LEN 4
 #define        MC_CMD_MUM_IN_GPIO_OPCODE_LBN 0
 #define        MC_CMD_MUM_IN_GPIO_OPCODE_WIDTH 8
 #define          MC_CMD_MUM_IN_GPIO_IN_READ 0x0 /* enum */
 /* MC_CMD_MUM_IN_GPIO_IN_READ msgrequest */
 #define    MC_CMD_MUM_IN_GPIO_IN_READ_LEN 8
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 #define       MC_CMD_MUM_IN_GPIO_IN_READ_HDR_OFST 4
+#define       MC_CMD_MUM_IN_GPIO_IN_READ_HDR_LEN 4
 
 /* MC_CMD_MUM_IN_GPIO_OUT_WRITE msgrequest */
 #define    MC_CMD_MUM_IN_GPIO_OUT_WRITE_LEN 16
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 #define       MC_CMD_MUM_IN_GPIO_OUT_WRITE_HDR_OFST 4
+#define       MC_CMD_MUM_IN_GPIO_OUT_WRITE_HDR_LEN 4
 /* The first 32-bit word to be written to the GPIO OUT register. */
 #define       MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK1_OFST 8
+#define       MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK1_LEN 4
 /* The second 32-bit word to be written to the GPIO OUT register. */
 #define       MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK2_OFST 12
+#define       MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK2_LEN 4
 
 /* MC_CMD_MUM_IN_GPIO_OUT_READ msgrequest */
 #define    MC_CMD_MUM_IN_GPIO_OUT_READ_LEN 8
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 #define       MC_CMD_MUM_IN_GPIO_OUT_READ_HDR_OFST 4
+#define       MC_CMD_MUM_IN_GPIO_OUT_READ_HDR_LEN 4
 
 /* MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE msgrequest */
 #define    MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_LEN 16
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 #define       MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_HDR_OFST 4
+#define       MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_HDR_LEN 4
 /* The first 32-bit word to be written to the GPIO OUT ENABLE register. */
 #define       MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK1_OFST 8
+#define       MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK1_LEN 4
 /* The second 32-bit word to be written to the GPIO OUT ENABLE register. */
 #define       MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK2_OFST 12
+#define       MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK2_LEN 4
 
 /* MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ msgrequest */
 #define    MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ_LEN 8
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 #define       MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ_HDR_OFST 4
+#define       MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ_HDR_LEN 4
 
 /* MC_CMD_MUM_IN_GPIO_OP msgrequest */
 #define    MC_CMD_MUM_IN_GPIO_OP_LEN 8
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 #define       MC_CMD_MUM_IN_GPIO_OP_HDR_OFST 4
+#define       MC_CMD_MUM_IN_GPIO_OP_HDR_LEN 4
 #define        MC_CMD_MUM_IN_GPIO_OP_BITWISE_OP_LBN 8
 #define        MC_CMD_MUM_IN_GPIO_OP_BITWISE_OP_WIDTH 8
 #define          MC_CMD_MUM_IN_GPIO_OP_OUT_READ 0x0 /* enum */
 /* MC_CMD_MUM_IN_GPIO_OP_OUT_READ msgrequest */
 #define    MC_CMD_MUM_IN_GPIO_OP_OUT_READ_LEN 8
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 #define       MC_CMD_MUM_IN_GPIO_OP_OUT_READ_HDR_OFST 4
+#define       MC_CMD_MUM_IN_GPIO_OP_OUT_READ_HDR_LEN 4
 
 /* MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE msgrequest */
 #define    MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_LEN 8
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 #define       MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_HDR_OFST 4
+#define       MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_HDR_LEN 4
 #define        MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_WRITEBIT_LBN 24
 #define        MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_WRITEBIT_WIDTH 8
 
 /* MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG msgrequest */
 #define    MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_LEN 8
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 #define       MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_HDR_OFST 4
+#define       MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_HDR_LEN 4
 #define        MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_CFG_LBN 24
 #define        MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_CFG_WIDTH 8
 
 /* MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE msgrequest */
 #define    MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_LEN 8
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 #define       MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_HDR_OFST 4
+#define       MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_HDR_LEN 4
 #define        MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_ENABLEBIT_LBN 24
 #define        MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_ENABLEBIT_WIDTH 8
 
 #define    MC_CMD_MUM_IN_READ_SENSORS_LEN 8
 /* MUM cmd header */
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 #define       MC_CMD_MUM_IN_READ_SENSORS_PARAMS_OFST 4
+#define       MC_CMD_MUM_IN_READ_SENSORS_PARAMS_LEN 4
 #define        MC_CMD_MUM_IN_READ_SENSORS_SENSOR_ID_LBN 0
 #define        MC_CMD_MUM_IN_READ_SENSORS_SENSOR_ID_WIDTH 8
 #define        MC_CMD_MUM_IN_READ_SENSORS_NUM_SENSORS_LBN 8
 #define    MC_CMD_MUM_IN_PROGRAM_CLOCKS_LEN 12
 /* MUM cmd header */
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 /* Bit-mask of clocks to be programmed */
 #define       MC_CMD_MUM_IN_PROGRAM_CLOCKS_MASK_OFST 4
+#define       MC_CMD_MUM_IN_PROGRAM_CLOCKS_MASK_LEN 4
 #define          MC_CMD_MUM_CLOCK_ID_FPGA 0x0 /* enum */
 #define          MC_CMD_MUM_CLOCK_ID_DDR 0x1 /* enum */
 #define          MC_CMD_MUM_CLOCK_ID_NIC 0x2 /* enum */
 /* Control flags for clock programming */
 #define       MC_CMD_MUM_IN_PROGRAM_CLOCKS_FLAGS_OFST 8
+#define       MC_CMD_MUM_IN_PROGRAM_CLOCKS_FLAGS_LEN 4
 #define        MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_LBN 0
 #define        MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_WIDTH 1
 #define        MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_NIC_FROM_FPGA_LBN 1
 #define    MC_CMD_MUM_IN_FPGA_LOAD_LEN 8
 /* MUM cmd header */
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 /* Enable/Disable FPGA config from flash */
 #define       MC_CMD_MUM_IN_FPGA_LOAD_ENABLE_OFST 4
+#define       MC_CMD_MUM_IN_FPGA_LOAD_ENABLE_LEN 4
 
 /* MC_CMD_MUM_IN_READ_ATB_SENSOR msgrequest */
 #define    MC_CMD_MUM_IN_READ_ATB_SENSOR_LEN 4
 /* MUM cmd header */
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 
 /* MC_CMD_MUM_IN_QSFP msgrequest */
 #define    MC_CMD_MUM_IN_QSFP_LEN 12
 /* MUM cmd header */
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 #define       MC_CMD_MUM_IN_QSFP_HDR_OFST 4
+#define       MC_CMD_MUM_IN_QSFP_HDR_LEN 4
 #define        MC_CMD_MUM_IN_QSFP_OPCODE_LBN 0
 #define        MC_CMD_MUM_IN_QSFP_OPCODE_WIDTH 4
 #define          MC_CMD_MUM_IN_QSFP_INIT 0x0 /* enum */
 #define          MC_CMD_MUM_IN_QSFP_FILL_STATS 0x4 /* enum */
 #define          MC_CMD_MUM_IN_QSFP_POLL_BIST 0x5 /* enum */
 #define       MC_CMD_MUM_IN_QSFP_IDX_OFST 8
+#define       MC_CMD_MUM_IN_QSFP_IDX_LEN 4
 
 /* MC_CMD_MUM_IN_QSFP_INIT msgrequest */
 #define    MC_CMD_MUM_IN_QSFP_INIT_LEN 16
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 #define       MC_CMD_MUM_IN_QSFP_INIT_HDR_OFST 4
+#define       MC_CMD_MUM_IN_QSFP_INIT_HDR_LEN 4
 #define       MC_CMD_MUM_IN_QSFP_INIT_IDX_OFST 8
+#define       MC_CMD_MUM_IN_QSFP_INIT_IDX_LEN 4
 #define       MC_CMD_MUM_IN_QSFP_INIT_CAGE_OFST 12
+#define       MC_CMD_MUM_IN_QSFP_INIT_CAGE_LEN 4
 
 /* MC_CMD_MUM_IN_QSFP_RECONFIGURE msgrequest */
 #define    MC_CMD_MUM_IN_QSFP_RECONFIGURE_LEN 24
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 #define       MC_CMD_MUM_IN_QSFP_RECONFIGURE_HDR_OFST 4
+#define       MC_CMD_MUM_IN_QSFP_RECONFIGURE_HDR_LEN 4
 #define       MC_CMD_MUM_IN_QSFP_RECONFIGURE_IDX_OFST 8
+#define       MC_CMD_MUM_IN_QSFP_RECONFIGURE_IDX_LEN 4
 #define       MC_CMD_MUM_IN_QSFP_RECONFIGURE_TX_DISABLE_OFST 12
+#define       MC_CMD_MUM_IN_QSFP_RECONFIGURE_TX_DISABLE_LEN 4
 #define       MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LANES_OFST 16
+#define       MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LANES_LEN 4
 #define       MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LINK_SPEED_OFST 20
+#define       MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LINK_SPEED_LEN 4
 
 /* MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP msgrequest */
 #define    MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_LEN 12
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 #define       MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_HDR_OFST 4
+#define       MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_HDR_LEN 4
 #define       MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_IDX_OFST 8
+#define       MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_IDX_LEN 4
 
 /* MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO msgrequest */
 #define    MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_LEN 16
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 #define       MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_HDR_OFST 4
+#define       MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_HDR_LEN 4
 #define       MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_IDX_OFST 8
+#define       MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_IDX_LEN 4
 #define       MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_PAGE_OFST 12
+#define       MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_PAGE_LEN 4
 
 /* MC_CMD_MUM_IN_QSFP_FILL_STATS msgrequest */
 #define    MC_CMD_MUM_IN_QSFP_FILL_STATS_LEN 12
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 #define       MC_CMD_MUM_IN_QSFP_FILL_STATS_HDR_OFST 4
+#define       MC_CMD_MUM_IN_QSFP_FILL_STATS_HDR_LEN 4
 #define       MC_CMD_MUM_IN_QSFP_FILL_STATS_IDX_OFST 8
+#define       MC_CMD_MUM_IN_QSFP_FILL_STATS_IDX_LEN 4
 
 /* MC_CMD_MUM_IN_QSFP_POLL_BIST msgrequest */
 #define    MC_CMD_MUM_IN_QSFP_POLL_BIST_LEN 12
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 #define       MC_CMD_MUM_IN_QSFP_POLL_BIST_HDR_OFST 4
+#define       MC_CMD_MUM_IN_QSFP_POLL_BIST_HDR_LEN 4
 #define       MC_CMD_MUM_IN_QSFP_POLL_BIST_IDX_OFST 8
+#define       MC_CMD_MUM_IN_QSFP_POLL_BIST_IDX_LEN 4
 
 /* MC_CMD_MUM_IN_READ_DDR_INFO msgrequest */
 #define    MC_CMD_MUM_IN_READ_DDR_INFO_LEN 4
 /* MUM cmd header */
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 
 /* MC_CMD_MUM_OUT msgresponse */
 #define    MC_CMD_MUM_OUT_LEN 0
 /* MC_CMD_MUM_OUT_GET_VERSION msgresponse */
 #define    MC_CMD_MUM_OUT_GET_VERSION_LEN 12
 #define       MC_CMD_MUM_OUT_GET_VERSION_FIRMWARE_OFST 0
+#define       MC_CMD_MUM_OUT_GET_VERSION_FIRMWARE_LEN 4
 #define       MC_CMD_MUM_OUT_GET_VERSION_VERSION_OFST 4
 #define       MC_CMD_MUM_OUT_GET_VERSION_VERSION_LEN 8
 #define       MC_CMD_MUM_OUT_GET_VERSION_VERSION_LO_OFST 4
 #define    MC_CMD_MUM_OUT_GPIO_IN_READ_LEN 8
 /* The first 32-bit word read from the GPIO IN register. */
 #define       MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK1_OFST 0
+#define       MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK1_LEN 4
 /* The second 32-bit word read from the GPIO IN register. */
 #define       MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK2_OFST 4
+#define       MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK2_LEN 4
 
 /* MC_CMD_MUM_OUT_GPIO_OUT_WRITE msgresponse */
 #define    MC_CMD_MUM_OUT_GPIO_OUT_WRITE_LEN 0
 #define    MC_CMD_MUM_OUT_GPIO_OUT_READ_LEN 8
 /* The first 32-bit word read from the GPIO OUT register. */
 #define       MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK1_OFST 0
+#define       MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK1_LEN 4
 /* The second 32-bit word read from the GPIO OUT register. */
 #define       MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK2_OFST 4
+#define       MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK2_LEN 4
 
 /* MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_WRITE msgresponse */
 #define    MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_WRITE_LEN 0
 /* MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ msgresponse */
 #define    MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_LEN 8
 #define       MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK1_OFST 0
+#define       MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK1_LEN 4
 #define       MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK2_OFST 4
+#define       MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK2_LEN 4
 
 /* MC_CMD_MUM_OUT_GPIO_OP_OUT_READ msgresponse */
 #define    MC_CMD_MUM_OUT_GPIO_OP_OUT_READ_LEN 4
 #define       MC_CMD_MUM_OUT_GPIO_OP_OUT_READ_BIT_READ_OFST 0
+#define       MC_CMD_MUM_OUT_GPIO_OP_OUT_READ_BIT_READ_LEN 4
 
 /* MC_CMD_MUM_OUT_GPIO_OP_OUT_WRITE msgresponse */
 #define    MC_CMD_MUM_OUT_GPIO_OP_OUT_WRITE_LEN 0
 /* MC_CMD_MUM_OUT_PROGRAM_CLOCKS msgresponse */
 #define    MC_CMD_MUM_OUT_PROGRAM_CLOCKS_LEN 4
 #define       MC_CMD_MUM_OUT_PROGRAM_CLOCKS_OK_MASK_OFST 0
+#define       MC_CMD_MUM_OUT_PROGRAM_CLOCKS_OK_MASK_LEN 4
 
 /* MC_CMD_MUM_OUT_FPGA_LOAD msgresponse */
 #define    MC_CMD_MUM_OUT_FPGA_LOAD_LEN 0
 /* MC_CMD_MUM_OUT_READ_ATB_SENSOR msgresponse */
 #define    MC_CMD_MUM_OUT_READ_ATB_SENSOR_LEN 4
 #define       MC_CMD_MUM_OUT_READ_ATB_SENSOR_RESULT_OFST 0
+#define       MC_CMD_MUM_OUT_READ_ATB_SENSOR_RESULT_LEN 4
 
 /* MC_CMD_MUM_OUT_QSFP_INIT msgresponse */
 #define    MC_CMD_MUM_OUT_QSFP_INIT_LEN 0
 /* MC_CMD_MUM_OUT_QSFP_RECONFIGURE msgresponse */
 #define    MC_CMD_MUM_OUT_QSFP_RECONFIGURE_LEN 8
 #define       MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LP_CAP_OFST 0
+#define       MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LP_CAP_LEN 4
 #define       MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_FLAGS_OFST 4
+#define       MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_FLAGS_LEN 4
 #define        MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_READY_LBN 0
 #define        MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_READY_WIDTH 1
 #define        MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LINK_UP_LBN 1
 /* MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP msgresponse */
 #define    MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP_LEN 4
 #define       MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP_PORT_PHY_LP_CAP_OFST 0
+#define       MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP_PORT_PHY_LP_CAP_LEN 4
 
 /* MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO msgresponse */
 #define    MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LENMIN 5
 #define    MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LEN(num) (4+1*(num))
 /* in bytes */
 #define       MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATALEN_OFST 0
+#define       MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATALEN_LEN 4
 #define       MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_OFST 4
 #define       MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_LEN 1
 #define       MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_MINNUM 1
 /* MC_CMD_MUM_OUT_QSFP_FILL_STATS msgresponse */
 #define    MC_CMD_MUM_OUT_QSFP_FILL_STATS_LEN 8
 #define       MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PMA_PMD_LINK_UP_OFST 0
+#define       MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PMA_PMD_LINK_UP_LEN 4
 #define       MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PCS_LINK_UP_OFST 4
+#define       MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PCS_LINK_UP_LEN 4
 
 /* MC_CMD_MUM_OUT_QSFP_POLL_BIST msgresponse */
 #define    MC_CMD_MUM_OUT_QSFP_POLL_BIST_LEN 4
 #define       MC_CMD_MUM_OUT_QSFP_POLL_BIST_TEST_OFST 0
+#define       MC_CMD_MUM_OUT_QSFP_POLL_BIST_TEST_LEN 4
 
 /* MC_CMD_MUM_OUT_READ_DDR_INFO msgresponse */
 #define    MC_CMD_MUM_OUT_READ_DDR_INFO_LENMIN 24
 #define    MC_CMD_MUM_OUT_READ_DDR_INFO_LEN(num) (8+8*(num))
 /* Discrete (soldered) DDR resistor strap info */
 #define       MC_CMD_MUM_OUT_READ_DDR_INFO_DISCRETE_DDR_INFO_OFST 0
+#define       MC_CMD_MUM_OUT_READ_DDR_INFO_DISCRETE_DDR_INFO_LEN 4
 #define        MC_CMD_MUM_OUT_READ_DDR_INFO_VRATIO_LBN 0
 #define        MC_CMD_MUM_OUT_READ_DDR_INFO_VRATIO_WIDTH 16
 #define        MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED1_LBN 16
 #define        MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED1_WIDTH 16
 /* Number of SODIMM info records */
 #define       MC_CMD_MUM_OUT_READ_DDR_INFO_NUM_RECORDS_OFST 4
+#define       MC_CMD_MUM_OUT_READ_DDR_INFO_NUM_RECORDS_LEN 4
 /* Array of SODIMM info records */
 #define       MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_OFST 8
 #define       MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_LEN 8
 /* EVB_PORT_ID structuredef */
 #define    EVB_PORT_ID_LEN 4
 #define       EVB_PORT_ID_PORT_ID_OFST 0
+#define       EVB_PORT_ID_PORT_ID_LEN 4
 /* enum: An invalid port handle. */
 #define          EVB_PORT_ID_NULL  0x0
 /* enum: The port assigned to this function.. */
 #define          NVRAM_PARTITION_TYPE_FC_LOG               0xb04
 /* enum: MUM firmware partition */
 #define          NVRAM_PARTITION_TYPE_MUM_FIRMWARE         0xc00
+/* enum: SUC firmware partition (this is intentionally an alias of
+ * MUM_FIRMWARE)
+ */
+#define          NVRAM_PARTITION_TYPE_SUC_FIRMWARE         0xc00
 /* enum: MUM Non-volatile log output partition. */
 #define          NVRAM_PARTITION_TYPE_MUM_LOG              0xc01
 /* enum: MUM Application table partition. */
 #define          NVRAM_PARTITION_TYPE_MUM_FUSELOCK         0xc06
 /* enum: UEFI expansion ROM if separate from PXE */
 #define          NVRAM_PARTITION_TYPE_EXPANSION_UEFI       0xd00
-/* enum: Spare partition 0 */
-#define          NVRAM_PARTITION_TYPE_SPARE_0              0x1000
+/* enum: Used by the expansion ROM for logging */
+#define          NVRAM_PARTITION_TYPE_PXE_LOG              0x1000
 /* enum: Used for XIP code of shmbooted images */
 #define          NVRAM_PARTITION_TYPE_XIP_SCRATCH          0x1100
 /* enum: Spare partition 2 */
 #define          NVRAM_PARTITION_TYPE_SPARE_4              0x1400
 /* enum: Spare partition 5 */
 #define          NVRAM_PARTITION_TYPE_SPARE_5              0x1500
+/* enum: Partition for reporting MC status. See mc_flash_layout.h
+ * medford_mc_status_hdr_t for layout on Medford.
+ */
+#define          NVRAM_PARTITION_TYPE_STATUS               0x1600
+/* enum: Spare partition 13 */
+#define          NVRAM_PARTITION_TYPE_SPARE_13              0x1700
+/* enum: Spare partition 14 */
+#define          NVRAM_PARTITION_TYPE_SPARE_14              0x1800
+/* enum: Spare partition 15 */
+#define          NVRAM_PARTITION_TYPE_SPARE_15              0x1900
+/* enum: Spare partition 16 */
+#define          NVRAM_PARTITION_TYPE_SPARE_16              0x1a00
+/* enum: Factory defaults for dynamic configuration */
+#define          NVRAM_PARTITION_TYPE_DYNCONFIG_DEFAULTS    0x1b00
+/* enum: Factory defaults for expansion ROM configuration */
+#define          NVRAM_PARTITION_TYPE_ROMCONFIG_DEFAULTS    0x1c00
+/* enum: Field Replaceable Unit inventory information for use on IPMI
+ * platforms. See SF-119124-PS. The STATIC_CONFIG partition may contain a
+ * subset of the information stored in this partition.
+ */
+#define          NVRAM_PARTITION_TYPE_FRU_INFORMATION       0x1d00
 /* enum: Start of reserved value range (firmware may use for any purpose) */
 #define          NVRAM_PARTITION_TYPE_RESERVED_VALUES_MIN  0xff00
 /* enum: End of reserved value range (firmware may use for any purpose) */
 /* LICENSED_APP_ID structuredef */
 #define    LICENSED_APP_ID_LEN 4
 #define       LICENSED_APP_ID_ID_OFST 0
+#define       LICENSED_APP_ID_ID_LEN 4
 /* enum: OpenOnload */
 #define          LICENSED_APP_ID_ONLOAD                  0x1
 /* enum: PTP timestamping */
 #define          LICENSED_APP_ID_SOLARCAPTURE_TAP        0x400
 /* enum: Capture SolarSystem 40G */
 #define          LICENSED_APP_ID_CAPTURE_SOLARSYSTEM_40G 0x800
+/* enum: Capture SolarSystem 1G */
+#define          LICENSED_APP_ID_CAPTURE_SOLARSYSTEM_1G  0x1000
+/* enum: ScaleOut Onload */
+#define          LICENSED_APP_ID_SCALEOUT_ONLOAD         0x2000
+/* enum: SCS Network Analytics Dashboard */
+#define          LICENSED_APP_ID_DSHBRD                  0x4000
+/* enum: SolarCapture Trading Analytics */
+#define          LICENSED_APP_ID_SCATRD                  0x8000
 #define       LICENSED_APP_ID_ID_LBN 0
 #define       LICENSED_APP_ID_ID_WIDTH 32
 
 #define        LICENSED_V3_APPS_SOLARCAPTURE_TAP_WIDTH 1
 #define        LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_40G_LBN 11
 #define        LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_40G_WIDTH 1
+#define        LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_1G_LBN 12
+#define        LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_1G_WIDTH 1
+#define        LICENSED_V3_APPS_SCALEOUT_ONLOAD_LBN 13
+#define        LICENSED_V3_APPS_SCALEOUT_ONLOAD_WIDTH 1
+#define        LICENSED_V3_APPS_DSHBRD_LBN 14
+#define        LICENSED_V3_APPS_DSHBRD_WIDTH 1
+#define        LICENSED_V3_APPS_SCATRD_LBN 15
+#define        LICENSED_V3_APPS_SCATRD_WIDTH 1
 #define       LICENSED_V3_APPS_MASK_LBN 0
 #define       LICENSED_V3_APPS_MASK_WIDTH 64
 
 #define       TX_TIMESTAMP_EVENT_TX_EV_TYPE_LEN 1
 /* enum: This is a TX completion event, not a timestamp */
 #define          TX_TIMESTAMP_EVENT_TX_EV_COMPLETION  0x0
+/* enum: This is a TX completion event for a CTPIO transmit. The event format
+ * is the same as for TX_EV_COMPLETION.
+ */
+#define          TX_TIMESTAMP_EVENT_TX_EV_CTPIO_COMPLETION  0x11
+/* enum: This is the low part of a TX timestamp for a CTPIO transmission. The
+ * event format is the same as for TX_EV_TSTAMP_LO
+ */
+#define          TX_TIMESTAMP_EVENT_TX_EV_CTPIO_TS_LO  0x12
+/* enum: This is the high part of a TX timestamp for a CTPIO transmission. The
+ * event format is the same as for TX_EV_TSTAMP_HI
+ */
+#define          TX_TIMESTAMP_EVENT_TX_EV_CTPIO_TS_HI  0x13
 /* enum: This is the low part of a TX timestamp event */
 #define          TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_LO  0x51
 /* enum: This is the high part of a TX timestamp event */
 #define       RSS_MODE_HASH_SELECTOR_LBN 0
 #define       RSS_MODE_HASH_SELECTOR_WIDTH 8
 
+/* CTPIO_STATS_MAP structuredef */
+#define    CTPIO_STATS_MAP_LEN 4
+/* The (function relative) VI number */
+#define       CTPIO_STATS_MAP_VI_OFST 0
+#define       CTPIO_STATS_MAP_VI_LEN 2
+#define       CTPIO_STATS_MAP_VI_LBN 0
+#define       CTPIO_STATS_MAP_VI_WIDTH 16
+/* The target bucket for the VI */
+#define       CTPIO_STATS_MAP_BUCKET_OFST 2
+#define       CTPIO_STATS_MAP_BUCKET_LEN 2
+#define       CTPIO_STATS_MAP_BUCKET_LBN 16
+#define       CTPIO_STATS_MAP_BUCKET_WIDTH 16
+
 
 /***********************************/
 /* MC_CMD_READ_REGS
  */
 #define MC_CMD_READ_REGS 0x50
 
-#define MC_CMD_0x50_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x50_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_READ_REGS_IN msgrequest */
 #define    MC_CMD_READ_REGS_IN_LEN 0
 #define    MC_CMD_INIT_EVQ_IN_LEN(num) (36+8*(num))
 /* Size, in entries */
 #define       MC_CMD_INIT_EVQ_IN_SIZE_OFST 0
+#define       MC_CMD_INIT_EVQ_IN_SIZE_LEN 4
 /* Desired instance. Must be set to a specific instance, which is a function
  * local queue index.
  */
 #define       MC_CMD_INIT_EVQ_IN_INSTANCE_OFST 4
+#define       MC_CMD_INIT_EVQ_IN_INSTANCE_LEN 4
 /* The initial timer value. The load value is ignored if the timer mode is DIS.
  */
 #define       MC_CMD_INIT_EVQ_IN_TMR_LOAD_OFST 8
+#define       MC_CMD_INIT_EVQ_IN_TMR_LOAD_LEN 4
 /* The reload value is ignored in one-shot modes */
 #define       MC_CMD_INIT_EVQ_IN_TMR_RELOAD_OFST 12
+#define       MC_CMD_INIT_EVQ_IN_TMR_RELOAD_LEN 4
 /* tbd */
 #define       MC_CMD_INIT_EVQ_IN_FLAGS_OFST 16
+#define       MC_CMD_INIT_EVQ_IN_FLAGS_LEN 4
 #define        MC_CMD_INIT_EVQ_IN_FLAG_INTERRUPTING_LBN 0
 #define        MC_CMD_INIT_EVQ_IN_FLAG_INTERRUPTING_WIDTH 1
 #define        MC_CMD_INIT_EVQ_IN_FLAG_RPTR_DOS_LBN 1
 #define        MC_CMD_INIT_EVQ_IN_FLAG_USE_TIMER_LBN 6
 #define        MC_CMD_INIT_EVQ_IN_FLAG_USE_TIMER_WIDTH 1
 #define       MC_CMD_INIT_EVQ_IN_TMR_MODE_OFST 20
+#define       MC_CMD_INIT_EVQ_IN_TMR_MODE_LEN 4
 /* enum: Disabled */
 #define          MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS 0x0
 /* enum: Immediate */
 #define          MC_CMD_INIT_EVQ_IN_TMR_INT_HLDOFF 0x3
 /* Target EVQ for wakeups if in wakeup mode. */
 #define       MC_CMD_INIT_EVQ_IN_TARGET_EVQ_OFST 24
+#define       MC_CMD_INIT_EVQ_IN_TARGET_EVQ_LEN 4
 /* Target interrupt if in interrupting mode (note union with target EVQ). Use
  * MC_CMD_RESOURCE_INSTANCE_ANY unless a specific one required for test
  * purposes.
  */
 #define       MC_CMD_INIT_EVQ_IN_IRQ_NUM_OFST 24
+#define       MC_CMD_INIT_EVQ_IN_IRQ_NUM_LEN 4
 /* Event Counter Mode. */
 #define       MC_CMD_INIT_EVQ_IN_COUNT_MODE_OFST 28
+#define       MC_CMD_INIT_EVQ_IN_COUNT_MODE_LEN 4
 /* enum: Disabled */
 #define          MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS 0x0
 /* enum: Disabled */
 #define          MC_CMD_INIT_EVQ_IN_COUNT_MODE_RXTX 0x3
 /* Event queue packet count threshold. */
 #define       MC_CMD_INIT_EVQ_IN_COUNT_THRSHLD_OFST 32
+#define       MC_CMD_INIT_EVQ_IN_COUNT_THRSHLD_LEN 4
 /* 64-bit address of 4k of 4k-aligned host memory buffer */
 #define       MC_CMD_INIT_EVQ_IN_DMA_ADDR_OFST 36
 #define       MC_CMD_INIT_EVQ_IN_DMA_ADDR_LEN 8
 #define    MC_CMD_INIT_EVQ_OUT_LEN 4
 /* Only valid if INTRFLAG was true */
 #define       MC_CMD_INIT_EVQ_OUT_IRQ_OFST 0
+#define       MC_CMD_INIT_EVQ_OUT_IRQ_LEN 4
 
 /* MC_CMD_INIT_EVQ_V2_IN msgrequest */
 #define    MC_CMD_INIT_EVQ_V2_IN_LENMIN 44
 #define    MC_CMD_INIT_EVQ_V2_IN_LEN(num) (36+8*(num))
 /* Size, in entries */
 #define       MC_CMD_INIT_EVQ_V2_IN_SIZE_OFST 0
+#define       MC_CMD_INIT_EVQ_V2_IN_SIZE_LEN 4
 /* Desired instance. Must be set to a specific instance, which is a function
  * local queue index.
  */
 #define       MC_CMD_INIT_EVQ_V2_IN_INSTANCE_OFST 4
+#define       MC_CMD_INIT_EVQ_V2_IN_INSTANCE_LEN 4
 /* The initial timer value. The load value is ignored if the timer mode is DIS.
  */
 #define       MC_CMD_INIT_EVQ_V2_IN_TMR_LOAD_OFST 8
+#define       MC_CMD_INIT_EVQ_V2_IN_TMR_LOAD_LEN 4
 /* The reload value is ignored in one-shot modes */
 #define       MC_CMD_INIT_EVQ_V2_IN_TMR_RELOAD_OFST 12
+#define       MC_CMD_INIT_EVQ_V2_IN_TMR_RELOAD_LEN 4
 /* tbd */
 #define       MC_CMD_INIT_EVQ_V2_IN_FLAGS_OFST 16
+#define       MC_CMD_INIT_EVQ_V2_IN_FLAGS_LEN 4
 #define        MC_CMD_INIT_EVQ_V2_IN_FLAG_INTERRUPTING_LBN 0
 #define        MC_CMD_INIT_EVQ_V2_IN_FLAG_INTERRUPTING_WIDTH 1
 #define        MC_CMD_INIT_EVQ_V2_IN_FLAG_RPTR_DOS_LBN 1
  */
 #define          MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_AUTO 0x3
 #define       MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_OFST 20
+#define       MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_LEN 4
 /* enum: Disabled */
 #define          MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_DIS 0x0
 /* enum: Immediate */
 #define          MC_CMD_INIT_EVQ_V2_IN_TMR_INT_HLDOFF 0x3
 /* Target EVQ for wakeups if in wakeup mode. */
 #define       MC_CMD_INIT_EVQ_V2_IN_TARGET_EVQ_OFST 24
+#define       MC_CMD_INIT_EVQ_V2_IN_TARGET_EVQ_LEN 4
 /* Target interrupt if in interrupting mode (note union with target EVQ). Use
  * MC_CMD_RESOURCE_INSTANCE_ANY unless a specific one required for test
  * purposes.
  */
 #define       MC_CMD_INIT_EVQ_V2_IN_IRQ_NUM_OFST 24
+#define       MC_CMD_INIT_EVQ_V2_IN_IRQ_NUM_LEN 4
 /* Event Counter Mode. */
 #define       MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_OFST 28
+#define       MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_LEN 4
 /* enum: Disabled */
 #define          MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_DIS 0x0
 /* enum: Disabled */
 #define          MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_RXTX 0x3
 /* Event queue packet count threshold. */
 #define       MC_CMD_INIT_EVQ_V2_IN_COUNT_THRSHLD_OFST 32
+#define       MC_CMD_INIT_EVQ_V2_IN_COUNT_THRSHLD_LEN 4
 /* 64-bit address of 4k of 4k-aligned host memory buffer */
 #define       MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_OFST 36
 #define       MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_LEN 8
 #define    MC_CMD_INIT_EVQ_V2_OUT_LEN 8
 /* Only valid if INTRFLAG was true */
 #define       MC_CMD_INIT_EVQ_V2_OUT_IRQ_OFST 0
+#define       MC_CMD_INIT_EVQ_V2_OUT_IRQ_LEN 4
 /* Actual configuration applied on the card */
 #define       MC_CMD_INIT_EVQ_V2_OUT_FLAGS_OFST 4
+#define       MC_CMD_INIT_EVQ_V2_OUT_FLAGS_LEN 4
 #define        MC_CMD_INIT_EVQ_V2_OUT_FLAG_CUT_THRU_LBN 0
 #define        MC_CMD_INIT_EVQ_V2_OUT_FLAG_CUT_THRU_WIDTH 1
 #define        MC_CMD_INIT_EVQ_V2_OUT_FLAG_RX_MERGE_LBN 1
 #define    MC_CMD_INIT_RXQ_IN_LEN(num) (28+8*(num))
 /* Size, in entries */
 #define       MC_CMD_INIT_RXQ_IN_SIZE_OFST 0
+#define       MC_CMD_INIT_RXQ_IN_SIZE_LEN 4
 /* The EVQ to send events to. This is an index originally specified to INIT_EVQ
  */
 #define       MC_CMD_INIT_RXQ_IN_TARGET_EVQ_OFST 4
+#define       MC_CMD_INIT_RXQ_IN_TARGET_EVQ_LEN 4
 /* The value to put in the event data. Check hardware spec. for valid range. */
 #define       MC_CMD_INIT_RXQ_IN_LABEL_OFST 8
+#define       MC_CMD_INIT_RXQ_IN_LABEL_LEN 4
 /* Desired instance. Must be set to a specific instance, which is a function
  * local queue index.
  */
 #define       MC_CMD_INIT_RXQ_IN_INSTANCE_OFST 12
+#define       MC_CMD_INIT_RXQ_IN_INSTANCE_LEN 4
 /* There will be more flags here. */
 #define       MC_CMD_INIT_RXQ_IN_FLAGS_OFST 16
+#define       MC_CMD_INIT_RXQ_IN_FLAGS_LEN 4
 #define        MC_CMD_INIT_RXQ_IN_FLAG_BUFF_MODE_LBN 0
 #define        MC_CMD_INIT_RXQ_IN_FLAG_BUFF_MODE_WIDTH 1
 #define        MC_CMD_INIT_RXQ_IN_FLAG_HDR_SPLIT_LBN 1
 #define        MC_CMD_INIT_RXQ_IN_UNUSED_WIDTH 1
 /* Owner ID to use if in buffer mode (zero if physical) */
 #define       MC_CMD_INIT_RXQ_IN_OWNER_ID_OFST 20
+#define       MC_CMD_INIT_RXQ_IN_OWNER_ID_LEN 4
 /* The port ID associated with the v-adaptor which should contain this DMAQ. */
 #define       MC_CMD_INIT_RXQ_IN_PORT_ID_OFST 24
+#define       MC_CMD_INIT_RXQ_IN_PORT_ID_LEN 4
 /* 64-bit address of 4k of 4k-aligned host memory buffer */
 #define       MC_CMD_INIT_RXQ_IN_DMA_ADDR_OFST 28
 #define       MC_CMD_INIT_RXQ_IN_DMA_ADDR_LEN 8
 #define    MC_CMD_INIT_RXQ_EXT_IN_LEN 544
 /* Size, in entries */
 #define       MC_CMD_INIT_RXQ_EXT_IN_SIZE_OFST 0
+#define       MC_CMD_INIT_RXQ_EXT_IN_SIZE_LEN 4
 /* The EVQ to send events to. This is an index originally specified to INIT_EVQ
  */
 #define       MC_CMD_INIT_RXQ_EXT_IN_TARGET_EVQ_OFST 4
+#define       MC_CMD_INIT_RXQ_EXT_IN_TARGET_EVQ_LEN 4
 /* The value to put in the event data. Check hardware spec. for valid range. */
 #define       MC_CMD_INIT_RXQ_EXT_IN_LABEL_OFST 8
+#define       MC_CMD_INIT_RXQ_EXT_IN_LABEL_LEN 4
 /* Desired instance. Must be set to a specific instance, which is a function
  * local queue index.
  */
 #define       MC_CMD_INIT_RXQ_EXT_IN_INSTANCE_OFST 12
+#define       MC_CMD_INIT_RXQ_EXT_IN_INSTANCE_LEN 4
 /* There will be more flags here. */
 #define       MC_CMD_INIT_RXQ_EXT_IN_FLAGS_OFST 16
+#define       MC_CMD_INIT_RXQ_EXT_IN_FLAGS_LEN 4
 #define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_BUFF_MODE_LBN 0
 #define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_BUFF_MODE_WIDTH 1
 #define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_HDR_SPLIT_LBN 1
 #define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_FORCE_EV_MERGING_WIDTH 1
 /* Owner ID to use if in buffer mode (zero if physical) */
 #define       MC_CMD_INIT_RXQ_EXT_IN_OWNER_ID_OFST 20
+#define       MC_CMD_INIT_RXQ_EXT_IN_OWNER_ID_LEN 4
 /* The port ID associated with the v-adaptor which should contain this DMAQ. */
 #define       MC_CMD_INIT_RXQ_EXT_IN_PORT_ID_OFST 24
+#define       MC_CMD_INIT_RXQ_EXT_IN_PORT_ID_LEN 4
 /* 64-bit address of 4k of 4k-aligned host memory buffer */
 #define       MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_OFST 28
 #define       MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_LEN 8
 #define       MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_NUM 64
 /* Maximum length of packet to receive, if SNAPSHOT_MODE flag is set */
 #define       MC_CMD_INIT_RXQ_EXT_IN_SNAPSHOT_LENGTH_OFST 540
+#define       MC_CMD_INIT_RXQ_EXT_IN_SNAPSHOT_LENGTH_LEN 4
 
 /* MC_CMD_INIT_RXQ_OUT msgresponse */
 #define    MC_CMD_INIT_RXQ_OUT_LEN 0
 #define    MC_CMD_INIT_TXQ_IN_LEN(num) (28+8*(num))
 /* Size, in entries */
 #define       MC_CMD_INIT_TXQ_IN_SIZE_OFST 0
+#define       MC_CMD_INIT_TXQ_IN_SIZE_LEN 4
 /* The EVQ to send events to. This is an index originally specified to
  * INIT_EVQ.
  */
 #define       MC_CMD_INIT_TXQ_IN_TARGET_EVQ_OFST 4
+#define       MC_CMD_INIT_TXQ_IN_TARGET_EVQ_LEN 4
 /* The value to put in the event data. Check hardware spec. for valid range. */
 #define       MC_CMD_INIT_TXQ_IN_LABEL_OFST 8
+#define       MC_CMD_INIT_TXQ_IN_LABEL_LEN 4
 /* Desired instance. Must be set to a specific instance, which is a function
  * local queue index.
  */
 #define       MC_CMD_INIT_TXQ_IN_INSTANCE_OFST 12
+#define       MC_CMD_INIT_TXQ_IN_INSTANCE_LEN 4
 /* There will be more flags here. */
 #define       MC_CMD_INIT_TXQ_IN_FLAGS_OFST 16
+#define       MC_CMD_INIT_TXQ_IN_FLAGS_LEN 4
 #define        MC_CMD_INIT_TXQ_IN_FLAG_BUFF_MODE_LBN 0
 #define        MC_CMD_INIT_TXQ_IN_FLAG_BUFF_MODE_WIDTH 1
 #define        MC_CMD_INIT_TXQ_IN_FLAG_IP_CSUM_DIS_LBN 1
 #define        MC_CMD_INIT_TXQ_IN_FLAG_INNER_TCP_CSUM_EN_WIDTH 1
 /* Owner ID to use if in buffer mode (zero if physical) */
 #define       MC_CMD_INIT_TXQ_IN_OWNER_ID_OFST 20
+#define       MC_CMD_INIT_TXQ_IN_OWNER_ID_LEN 4
 /* The port ID associated with the v-adaptor which should contain this DMAQ. */
 #define       MC_CMD_INIT_TXQ_IN_PORT_ID_OFST 24
+#define       MC_CMD_INIT_TXQ_IN_PORT_ID_LEN 4
 /* 64-bit address of 4k of 4k-aligned host memory buffer */
 #define       MC_CMD_INIT_TXQ_IN_DMA_ADDR_OFST 28
 #define       MC_CMD_INIT_TXQ_IN_DMA_ADDR_LEN 8
 #define    MC_CMD_INIT_TXQ_EXT_IN_LEN 544
 /* Size, in entries */
 #define       MC_CMD_INIT_TXQ_EXT_IN_SIZE_OFST 0
+#define       MC_CMD_INIT_TXQ_EXT_IN_SIZE_LEN 4
 /* The EVQ to send events to. This is an index originally specified to
  * INIT_EVQ.
  */
 #define       MC_CMD_INIT_TXQ_EXT_IN_TARGET_EVQ_OFST 4
+#define       MC_CMD_INIT_TXQ_EXT_IN_TARGET_EVQ_LEN 4
 /* The value to put in the event data. Check hardware spec. for valid range. */
 #define       MC_CMD_INIT_TXQ_EXT_IN_LABEL_OFST 8
+#define       MC_CMD_INIT_TXQ_EXT_IN_LABEL_LEN 4
 /* Desired instance. Must be set to a specific instance, which is a function
  * local queue index.
  */
 #define       MC_CMD_INIT_TXQ_EXT_IN_INSTANCE_OFST 12
+#define       MC_CMD_INIT_TXQ_EXT_IN_INSTANCE_LEN 4
 /* There will be more flags here. */
 #define       MC_CMD_INIT_TXQ_EXT_IN_FLAGS_OFST 16
+#define       MC_CMD_INIT_TXQ_EXT_IN_FLAGS_LEN 4
 #define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_BUFF_MODE_LBN 0
 #define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_BUFF_MODE_WIDTH 1
 #define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_IP_CSUM_DIS_LBN 1
 #define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_TSOV2_EN_WIDTH 1
 #define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_LBN 13
 #define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_WIDTH 1
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_UTHRESH_LBN 14
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_UTHRESH_WIDTH 1
 /* Owner ID to use if in buffer mode (zero if physical) */
 #define       MC_CMD_INIT_TXQ_EXT_IN_OWNER_ID_OFST 20
+#define       MC_CMD_INIT_TXQ_EXT_IN_OWNER_ID_LEN 4
 /* The port ID associated with the v-adaptor which should contain this DMAQ. */
 #define       MC_CMD_INIT_TXQ_EXT_IN_PORT_ID_OFST 24
+#define       MC_CMD_INIT_TXQ_EXT_IN_PORT_ID_LEN 4
 /* 64-bit address of 4k of 4k-aligned host memory buffer */
 #define       MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_OFST 28
 #define       MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_LEN 8
 #define       MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_MAXNUM 64
 /* Flags related to Qbb flow control mode. */
 #define       MC_CMD_INIT_TXQ_EXT_IN_QBB_FLAGS_OFST 540
+#define       MC_CMD_INIT_TXQ_EXT_IN_QBB_FLAGS_LEN 4
 #define        MC_CMD_INIT_TXQ_EXT_IN_QBB_ENABLE_LBN 0
 #define        MC_CMD_INIT_TXQ_EXT_IN_QBB_ENABLE_WIDTH 1
 #define        MC_CMD_INIT_TXQ_EXT_IN_QBB_PRIORITY_LBN 1
  * passed to INIT_EVQ
  */
 #define       MC_CMD_FINI_EVQ_IN_INSTANCE_OFST 0
+#define       MC_CMD_FINI_EVQ_IN_INSTANCE_LEN 4
 
 /* MC_CMD_FINI_EVQ_OUT msgresponse */
 #define    MC_CMD_FINI_EVQ_OUT_LEN 0
 #define    MC_CMD_FINI_RXQ_IN_LEN 4
 /* Instance of RXQ to destroy */
 #define       MC_CMD_FINI_RXQ_IN_INSTANCE_OFST 0
+#define       MC_CMD_FINI_RXQ_IN_INSTANCE_LEN 4
 
 /* MC_CMD_FINI_RXQ_OUT msgresponse */
 #define    MC_CMD_FINI_RXQ_OUT_LEN 0
 #define    MC_CMD_FINI_TXQ_IN_LEN 4
 /* Instance of TXQ to destroy */
 #define       MC_CMD_FINI_TXQ_IN_INSTANCE_OFST 0
+#define       MC_CMD_FINI_TXQ_IN_INSTANCE_LEN 4
 
 /* MC_CMD_FINI_TXQ_OUT msgresponse */
 #define    MC_CMD_FINI_TXQ_OUT_LEN 0
 #define    MC_CMD_DRIVER_EVENT_IN_LEN 12
 /* Handle of target EVQ */
 #define       MC_CMD_DRIVER_EVENT_IN_EVQ_OFST 0
+#define       MC_CMD_DRIVER_EVENT_IN_EVQ_LEN 4
 /* Bits 0 - 63 of event */
 #define       MC_CMD_DRIVER_EVENT_IN_DATA_OFST 4
 #define       MC_CMD_DRIVER_EVENT_IN_DATA_LEN 8
 #define    MC_CMD_PROXY_CMD_IN_LEN 4
 /* The handle of the target function. */
 #define       MC_CMD_PROXY_CMD_IN_TARGET_OFST 0
+#define       MC_CMD_PROXY_CMD_IN_TARGET_LEN 4
 #define        MC_CMD_PROXY_CMD_IN_TARGET_PF_LBN 0
 #define        MC_CMD_PROXY_CMD_IN_TARGET_PF_WIDTH 16
 #define        MC_CMD_PROXY_CMD_IN_TARGET_VF_LBN 16
 #define    MC_PROXY_STATUS_BUFFER_LEN 16
 /* Handle allocated by the firmware for this proxy transaction */
 #define       MC_PROXY_STATUS_BUFFER_HANDLE_OFST 0
+#define       MC_PROXY_STATUS_BUFFER_HANDLE_LEN 4
 /* enum: An invalid handle. */
 #define          MC_PROXY_STATUS_BUFFER_HANDLE_INVALID  0x0
 #define       MC_PROXY_STATUS_BUFFER_HANDLE_LBN 0
  * elevated privilege mask granted to the requesting function.
  */
 #define       MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_OFST 12
+#define       MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_LEN 4
 #define       MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_LBN 96
 #define       MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_WIDTH 32
 
 /* MC_CMD_PROXY_CONFIGURE_IN msgrequest */
 #define    MC_CMD_PROXY_CONFIGURE_IN_LEN 108
 #define       MC_CMD_PROXY_CONFIGURE_IN_FLAGS_OFST 0
+#define       MC_CMD_PROXY_CONFIGURE_IN_FLAGS_LEN 4
 #define        MC_CMD_PROXY_CONFIGURE_IN_ENABLE_LBN 0
 #define        MC_CMD_PROXY_CONFIGURE_IN_ENABLE_WIDTH 1
 /* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
 #define       MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_HI_OFST 8
 /* Must be a power of 2 */
 #define       MC_CMD_PROXY_CONFIGURE_IN_STATUS_BLOCK_SIZE_OFST 12
+#define       MC_CMD_PROXY_CONFIGURE_IN_STATUS_BLOCK_SIZE_LEN 4
 /* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
  * of blocks, each of the size REPLY_BLOCK_SIZE.
  */
 #define       MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_HI_OFST 20
 /* Must be a power of 2 */
 #define       MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BLOCK_SIZE_OFST 24
+#define       MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BLOCK_SIZE_LEN 4
 /* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
  * of blocks, each of the size STATUS_BLOCK_SIZE. This buffer is only needed if
  * host intends to complete proxied operations by using MC_CMD_PROXY_CMD.
 #define       MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_HI_OFST 32
 /* Must be a power of 2, or zero if this buffer is not provided */
 #define       MC_CMD_PROXY_CONFIGURE_IN_REPLY_BLOCK_SIZE_OFST 36
+#define       MC_CMD_PROXY_CONFIGURE_IN_REPLY_BLOCK_SIZE_LEN 4
 /* Applies to all three buffers */
 #define       MC_CMD_PROXY_CONFIGURE_IN_NUM_BLOCKS_OFST 40
+#define       MC_CMD_PROXY_CONFIGURE_IN_NUM_BLOCKS_LEN 4
 /* A bit mask defining which MCDI operations may be proxied */
 #define       MC_CMD_PROXY_CONFIGURE_IN_ALLOWED_MCDI_MASK_OFST 44
 #define       MC_CMD_PROXY_CONFIGURE_IN_ALLOWED_MCDI_MASK_LEN 64
 /* MC_CMD_PROXY_CONFIGURE_EXT_IN msgrequest */
 #define    MC_CMD_PROXY_CONFIGURE_EXT_IN_LEN 112
 #define       MC_CMD_PROXY_CONFIGURE_EXT_IN_FLAGS_OFST 0
+#define       MC_CMD_PROXY_CONFIGURE_EXT_IN_FLAGS_LEN 4
 #define        MC_CMD_PROXY_CONFIGURE_EXT_IN_ENABLE_LBN 0
 #define        MC_CMD_PROXY_CONFIGURE_EXT_IN_ENABLE_WIDTH 1
 /* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
 #define       MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_HI_OFST 8
 /* Must be a power of 2 */
 #define       MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BLOCK_SIZE_OFST 12
+#define       MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BLOCK_SIZE_LEN 4
 /* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
  * of blocks, each of the size REPLY_BLOCK_SIZE.
  */
 #define       MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_HI_OFST 20
 /* Must be a power of 2 */
 #define       MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BLOCK_SIZE_OFST 24
+#define       MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BLOCK_SIZE_LEN 4
 /* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
  * of blocks, each of the size STATUS_BLOCK_SIZE. This buffer is only needed if
  * host intends to complete proxied operations by using MC_CMD_PROXY_CMD.
 #define       MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_HI_OFST 32
 /* Must be a power of 2, or zero if this buffer is not provided */
 #define       MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BLOCK_SIZE_OFST 36
+#define       MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BLOCK_SIZE_LEN 4
 /* Applies to all three buffers */
 #define       MC_CMD_PROXY_CONFIGURE_EXT_IN_NUM_BLOCKS_OFST 40
+#define       MC_CMD_PROXY_CONFIGURE_EXT_IN_NUM_BLOCKS_LEN 4
 /* A bit mask defining which MCDI operations may be proxied */
 #define       MC_CMD_PROXY_CONFIGURE_EXT_IN_ALLOWED_MCDI_MASK_OFST 44
 #define       MC_CMD_PROXY_CONFIGURE_EXT_IN_ALLOWED_MCDI_MASK_LEN 64
 #define       MC_CMD_PROXY_CONFIGURE_EXT_IN_RESERVED_OFST 108
+#define       MC_CMD_PROXY_CONFIGURE_EXT_IN_RESERVED_LEN 4
 
 /* MC_CMD_PROXY_CONFIGURE_OUT msgresponse */
 #define    MC_CMD_PROXY_CONFIGURE_OUT_LEN 0
 /* MC_CMD_PROXY_COMPLETE_IN msgrequest */
 #define    MC_CMD_PROXY_COMPLETE_IN_LEN 12
 #define       MC_CMD_PROXY_COMPLETE_IN_BLOCK_INDEX_OFST 0
+#define       MC_CMD_PROXY_COMPLETE_IN_BLOCK_INDEX_LEN 4
 #define       MC_CMD_PROXY_COMPLETE_IN_STATUS_OFST 4
+#define       MC_CMD_PROXY_COMPLETE_IN_STATUS_LEN 4
 /* enum: The operation has been completed by using MC_CMD_PROXY_CMD, the reply
  * is stored in the REPLY_BUFF.
  */
  */
 #define          MC_CMD_PROXY_COMPLETE_IN_TIMEDOUT 0x3
 #define       MC_CMD_PROXY_COMPLETE_IN_HANDLE_OFST 8
+#define       MC_CMD_PROXY_COMPLETE_IN_HANDLE_LEN 4
 
 /* MC_CMD_PROXY_COMPLETE_OUT msgresponse */
 #define    MC_CMD_PROXY_COMPLETE_OUT_LEN 0
 #define    MC_CMD_ALLOC_BUFTBL_CHUNK_IN_LEN 8
 /* Owner ID to use */
 #define       MC_CMD_ALLOC_BUFTBL_CHUNK_IN_OWNER_OFST 0
+#define       MC_CMD_ALLOC_BUFTBL_CHUNK_IN_OWNER_LEN 4
 /* Size of buffer table pages to use, in bytes (note that only a few values are
  * legal on any specific hardware).
  */
 #define       MC_CMD_ALLOC_BUFTBL_CHUNK_IN_PAGE_SIZE_OFST 4
+#define       MC_CMD_ALLOC_BUFTBL_CHUNK_IN_PAGE_SIZE_LEN 4
 
 /* MC_CMD_ALLOC_BUFTBL_CHUNK_OUT msgresponse */
 #define    MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_LEN 12
 #define       MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_HANDLE_OFST 0
+#define       MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_HANDLE_LEN 4
 #define       MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_NUMENTRIES_OFST 4
+#define       MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_NUMENTRIES_LEN 4
 /* Buffer table IDs for use in DMA descriptors. */
 #define       MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_ID_OFST 8
+#define       MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_ID_LEN 4
 
 
 /***********************************/
 #define    MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMAX 268
 #define    MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LEN(num) (12+8*(num))
 #define       MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_HANDLE_OFST 0
+#define       MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_HANDLE_LEN 4
 /* ID */
 #define       MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_FIRSTID_OFST 4
+#define       MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_FIRSTID_LEN 4
 /* Num entries */
 #define       MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_NUMENTRIES_OFST 8
+#define       MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_NUMENTRIES_LEN 4
 /* Buffer table entry address */
 #define       MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_OFST 12
 #define       MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LEN 8
 /* MC_CMD_FREE_BUFTBL_CHUNK_IN msgrequest */
 #define    MC_CMD_FREE_BUFTBL_CHUNK_IN_LEN 4
 #define       MC_CMD_FREE_BUFTBL_CHUNK_IN_HANDLE_OFST 0
+#define       MC_CMD_FREE_BUFTBL_CHUNK_IN_HANDLE_LEN 4
 
 /* MC_CMD_FREE_BUFTBL_CHUNK_OUT msgresponse */
 #define    MC_CMD_FREE_BUFTBL_CHUNK_OUT_LEN 0
 
-/* PORT_CONFIG_ENTRY structuredef */
-#define    PORT_CONFIG_ENTRY_LEN 16
-/* External port number (label) */
-#define       PORT_CONFIG_ENTRY_EXT_NUMBER_OFST 0
-#define       PORT_CONFIG_ENTRY_EXT_NUMBER_LEN 1
-#define       PORT_CONFIG_ENTRY_EXT_NUMBER_LBN 0
-#define       PORT_CONFIG_ENTRY_EXT_NUMBER_WIDTH 8
-/* Port core location */
-#define       PORT_CONFIG_ENTRY_CORE_OFST 1
-#define       PORT_CONFIG_ENTRY_CORE_LEN 1
-#define          PORT_CONFIG_ENTRY_STANDALONE  0x0 /* enum */
-#define          PORT_CONFIG_ENTRY_MASTER  0x1 /* enum */
-#define          PORT_CONFIG_ENTRY_SLAVE  0x2 /* enum */
-#define       PORT_CONFIG_ENTRY_CORE_LBN 8
-#define       PORT_CONFIG_ENTRY_CORE_WIDTH 8
-/* Internal number (HW resource) relative to the core */
-#define       PORT_CONFIG_ENTRY_INT_NUMBER_OFST 2
-#define       PORT_CONFIG_ENTRY_INT_NUMBER_LEN 1
-#define       PORT_CONFIG_ENTRY_INT_NUMBER_LBN 16
-#define       PORT_CONFIG_ENTRY_INT_NUMBER_WIDTH 8
-/* Reserved */
-#define       PORT_CONFIG_ENTRY_RSVD_OFST 3
-#define       PORT_CONFIG_ENTRY_RSVD_LEN 1
-#define       PORT_CONFIG_ENTRY_RSVD_LBN 24
-#define       PORT_CONFIG_ENTRY_RSVD_WIDTH 8
-/* Bitmask of KR lanes used by the port */
-#define       PORT_CONFIG_ENTRY_LANES_OFST 4
-#define       PORT_CONFIG_ENTRY_LANES_LBN 32
-#define       PORT_CONFIG_ENTRY_LANES_WIDTH 32
-/* Port capabilities (MC_CMD_PHY_CAP_*) */
-#define       PORT_CONFIG_ENTRY_SUPPORTED_CAPS_OFST 8
-#define       PORT_CONFIG_ENTRY_SUPPORTED_CAPS_LBN 64
-#define       PORT_CONFIG_ENTRY_SUPPORTED_CAPS_WIDTH 32
-/* Reserved (align to 16 bytes) */
-#define       PORT_CONFIG_ENTRY_RSVD2_OFST 12
-#define       PORT_CONFIG_ENTRY_RSVD2_LBN 96
-#define       PORT_CONFIG_ENTRY_RSVD2_WIDTH 32
-
 
 /***********************************/
 /* MC_CMD_FILTER_OP
 #define    MC_CMD_FILTER_OP_IN_LEN 108
 /* identifies the type of operation requested */
 #define       MC_CMD_FILTER_OP_IN_OP_OFST 0
+#define       MC_CMD_FILTER_OP_IN_OP_LEN 4
 /* enum: single-recipient filter insert */
 #define          MC_CMD_FILTER_OP_IN_OP_INSERT  0x0
 /* enum: single-recipient filter remove */
 /* The port ID associated with the v-adaptor which should contain this filter.
  */
 #define       MC_CMD_FILTER_OP_IN_PORT_ID_OFST 12
+#define       MC_CMD_FILTER_OP_IN_PORT_ID_LEN 4
 /* fields to include in match criteria */
 #define       MC_CMD_FILTER_OP_IN_MATCH_FIELDS_OFST 16
+#define       MC_CMD_FILTER_OP_IN_MATCH_FIELDS_LEN 4
 #define        MC_CMD_FILTER_OP_IN_MATCH_SRC_IP_LBN 0
 #define        MC_CMD_FILTER_OP_IN_MATCH_SRC_IP_WIDTH 1
 #define        MC_CMD_FILTER_OP_IN_MATCH_DST_IP_LBN 1
 #define        MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_WIDTH 1
 /* receive destination */
 #define       MC_CMD_FILTER_OP_IN_RX_DEST_OFST 20
+#define       MC_CMD_FILTER_OP_IN_RX_DEST_LEN 4
 /* enum: drop packets */
 #define          MC_CMD_FILTER_OP_IN_RX_DEST_DROP  0x0
 /* enum: receive to host */
 #define          MC_CMD_FILTER_OP_IN_RX_DEST_TX1  0x4
 /* receive queue handle (for multiple queue modes, this is the base queue) */
 #define       MC_CMD_FILTER_OP_IN_RX_QUEUE_OFST 24
+#define       MC_CMD_FILTER_OP_IN_RX_QUEUE_LEN 4
 /* receive mode */
 #define       MC_CMD_FILTER_OP_IN_RX_MODE_OFST 28
+#define       MC_CMD_FILTER_OP_IN_RX_MODE_LEN 4
 /* enum: receive to just the specified queue */
 #define          MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE  0x0
 /* enum: receive to multiple queues using RSS context */
  * MC_CMD_DOT1P_MAPPING_ALLOC.
  */
 #define       MC_CMD_FILTER_OP_IN_RX_CONTEXT_OFST 32
+#define       MC_CMD_FILTER_OP_IN_RX_CONTEXT_LEN 4
 /* transmit domain (reserved; set to 0) */
 #define       MC_CMD_FILTER_OP_IN_TX_DOMAIN_OFST 36
+#define       MC_CMD_FILTER_OP_IN_TX_DOMAIN_LEN 4
 /* transmit destination (either set the MAC and/or PM bits for explicit
  * control, or set this field to TX_DEST_DEFAULT for sensible default
  * behaviour)
  */
 #define       MC_CMD_FILTER_OP_IN_TX_DEST_OFST 40
+#define       MC_CMD_FILTER_OP_IN_TX_DEST_LEN 4
 /* enum: request default behaviour (based on filter type) */
 #define          MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT  0xffffffff
 #define        MC_CMD_FILTER_OP_IN_TX_DEST_MAC_LBN 0
 #define       MC_CMD_FILTER_OP_IN_IP_PROTO_LEN 2
 /* Firmware defined register 0 to match (reserved; set to 0) */
 #define       MC_CMD_FILTER_OP_IN_FWDEF0_OFST 68
+#define       MC_CMD_FILTER_OP_IN_FWDEF0_LEN 4
 /* Firmware defined register 1 to match (reserved; set to 0) */
 #define       MC_CMD_FILTER_OP_IN_FWDEF1_OFST 72
+#define       MC_CMD_FILTER_OP_IN_FWDEF1_LEN 4
 /* source IP address to match (as bytes in network order; set last 12 bytes to
  * 0 for IPv4 address)
  */
 #define    MC_CMD_FILTER_OP_EXT_IN_LEN 172
 /* identifies the type of operation requested */
 #define       MC_CMD_FILTER_OP_EXT_IN_OP_OFST 0
+#define       MC_CMD_FILTER_OP_EXT_IN_OP_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_FILTER_OP_IN/OP */
 /* filter handle (for remove / unsubscribe operations) */
 /* The port ID associated with the v-adaptor which should contain this filter.
  */
 #define       MC_CMD_FILTER_OP_EXT_IN_PORT_ID_OFST 12
+#define       MC_CMD_FILTER_OP_EXT_IN_PORT_ID_LEN 4
 /* fields to include in match criteria */
 #define       MC_CMD_FILTER_OP_EXT_IN_MATCH_FIELDS_OFST 16
+#define       MC_CMD_FILTER_OP_EXT_IN_MATCH_FIELDS_LEN 4
 #define        MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_IP_LBN 0
 #define        MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_IP_WIDTH 1
 #define        MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_IP_LBN 1
 #define        MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_WIDTH 1
 /* receive destination */
 #define       MC_CMD_FILTER_OP_EXT_IN_RX_DEST_OFST 20
+#define       MC_CMD_FILTER_OP_EXT_IN_RX_DEST_LEN 4
 /* enum: drop packets */
 #define          MC_CMD_FILTER_OP_EXT_IN_RX_DEST_DROP  0x0
 /* enum: receive to host */
 #define          MC_CMD_FILTER_OP_EXT_IN_RX_DEST_TX1  0x4
 /* receive queue handle (for multiple queue modes, this is the base queue) */
 #define       MC_CMD_FILTER_OP_EXT_IN_RX_QUEUE_OFST 24
+#define       MC_CMD_FILTER_OP_EXT_IN_RX_QUEUE_LEN 4
 /* receive mode */
 #define       MC_CMD_FILTER_OP_EXT_IN_RX_MODE_OFST 28
+#define       MC_CMD_FILTER_OP_EXT_IN_RX_MODE_LEN 4
 /* enum: receive to just the specified queue */
 #define          MC_CMD_FILTER_OP_EXT_IN_RX_MODE_SIMPLE  0x0
 /* enum: receive to multiple queues using RSS context */
  * MC_CMD_DOT1P_MAPPING_ALLOC.
  */
 #define       MC_CMD_FILTER_OP_EXT_IN_RX_CONTEXT_OFST 32
+#define       MC_CMD_FILTER_OP_EXT_IN_RX_CONTEXT_LEN 4
 /* transmit domain (reserved; set to 0) */
 #define       MC_CMD_FILTER_OP_EXT_IN_TX_DOMAIN_OFST 36
+#define       MC_CMD_FILTER_OP_EXT_IN_TX_DOMAIN_LEN 4
 /* transmit destination (either set the MAC and/or PM bits for explicit
  * control, or set this field to TX_DEST_DEFAULT for sensible default
  * behaviour)
  */
 #define       MC_CMD_FILTER_OP_EXT_IN_TX_DEST_OFST 40
+#define       MC_CMD_FILTER_OP_EXT_IN_TX_DEST_LEN 4
 /* enum: request default behaviour (based on filter type) */
 #define          MC_CMD_FILTER_OP_EXT_IN_TX_DEST_DEFAULT  0xffffffff
 #define        MC_CMD_FILTER_OP_EXT_IN_TX_DEST_MAC_LBN 0
 #define       MC_CMD_FILTER_OP_EXT_IN_IP_PROTO_LEN 2
 /* Firmware defined register 0 to match (reserved; set to 0) */
 #define       MC_CMD_FILTER_OP_EXT_IN_FWDEF0_OFST 68
+#define       MC_CMD_FILTER_OP_EXT_IN_FWDEF0_LEN 4
 /* VNI (for VXLAN/Geneve, when IP protocol is UDP) or VSID (for NVGRE, when IP
  * protocol is GRE) to match (as bytes in network order; set last byte to 0 for
  * VXLAN/NVGRE, or 1 for Geneve)
  */
 #define       MC_CMD_FILTER_OP_EXT_IN_VNI_OR_VSID_OFST 72
+#define       MC_CMD_FILTER_OP_EXT_IN_VNI_OR_VSID_LEN 4
 #define        MC_CMD_FILTER_OP_EXT_IN_VNI_VALUE_LBN 0
 #define        MC_CMD_FILTER_OP_EXT_IN_VNI_VALUE_WIDTH 24
 #define        MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_LBN 24
  * to 0)
  */
 #define       MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF0_OFST 132
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF0_LEN 4
 /* VXLAN/NVGRE inner frame Firmware defined register 1 to match (reserved; set
  * to 0)
  */
 #define       MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF1_OFST 136
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF1_LEN 4
 /* VXLAN/NVGRE inner frame source IP address to match (as bytes in network
  * order; set last 12 bytes to 0 for IPv4 address)
  */
 #define    MC_CMD_FILTER_OP_OUT_LEN 12
 /* identifies the type of operation requested */
 #define       MC_CMD_FILTER_OP_OUT_OP_OFST 0
+#define       MC_CMD_FILTER_OP_OUT_OP_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_FILTER_OP_IN/OP */
 /* Returned filter handle (for insert / subscribe operations). Note that these
 #define    MC_CMD_FILTER_OP_EXT_OUT_LEN 12
 /* identifies the type of operation requested */
 #define       MC_CMD_FILTER_OP_EXT_OUT_OP_OFST 0
+#define       MC_CMD_FILTER_OP_EXT_OUT_OP_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_FILTER_OP_EXT_IN/OP */
 /* Returned filter handle (for insert / subscribe operations). Note that these
 #define    MC_CMD_GET_PARSER_DISP_INFO_IN_LEN 4
 /* identifies the type of operation requested */
 #define       MC_CMD_GET_PARSER_DISP_INFO_IN_OP_OFST 0
+#define       MC_CMD_GET_PARSER_DISP_INFO_IN_OP_LEN 4
 /* enum: read the list of supported RX filter matches */
 #define          MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES  0x1
 /* enum: read flags indicating restrictions on filter insertion for the calling
 #define    MC_CMD_GET_PARSER_DISP_INFO_OUT_LEN(num) (8+4*(num))
 /* identifies the type of operation requested */
 #define       MC_CMD_GET_PARSER_DISP_INFO_OUT_OP_OFST 0
+#define       MC_CMD_GET_PARSER_DISP_INFO_OUT_OP_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_GET_PARSER_DISP_INFO_IN/OP */
 /* number of supported match types */
 #define       MC_CMD_GET_PARSER_DISP_INFO_OUT_NUM_SUPPORTED_MATCHES_OFST 4
+#define       MC_CMD_GET_PARSER_DISP_INFO_OUT_NUM_SUPPORTED_MATCHES_LEN 4
 /* array of supported match types (valid MATCH_FIELDS values for
  * MC_CMD_FILTER_OP) sorted in decreasing priority order
  */
 #define    MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_LEN 8
 /* identifies the type of operation requested */
 #define       MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_OP_OFST 0
+#define       MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_OP_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_GET_PARSER_DISP_INFO_IN/OP */
 /* bitfield of filter insertion restrictions */
 #define       MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_RESTRICTION_FLAGS_OFST 4
+#define       MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_RESTRICTION_FLAGS_LEN 4
 #define        MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_DST_IP_MCAST_ONLY_LBN 0
 #define        MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_DST_IP_MCAST_ONLY_WIDTH 1
 
 #define    MC_CMD_PARSER_DISP_RW_IN_LEN 32
 /* identifies the target of the operation */
 #define       MC_CMD_PARSER_DISP_RW_IN_TARGET_OFST 0
+#define       MC_CMD_PARSER_DISP_RW_IN_TARGET_LEN 4
 /* enum: RX dispatcher CPU */
 #define          MC_CMD_PARSER_DISP_RW_IN_RX_DICPU  0x0
 /* enum: TX dispatcher CPU */
 #define          MC_CMD_PARSER_DISP_RW_IN_TX_DICPU  0x1
-/* enum: Lookup engine (with original metadata format) */
+/* enum: Lookup engine (with original metadata format). Deprecated; used only
+ * by cmdclient as a fallback for very old Huntington firmware, and not
+ * supported in firmware beyond v6.4.0.1005. Use LUE_VERSIONED_METADATA
+ * instead.
+ */
 #define          MC_CMD_PARSER_DISP_RW_IN_LUE  0x2
 /* enum: Lookup engine (with requested metadata format) */
 #define          MC_CMD_PARSER_DISP_RW_IN_LUE_VERSIONED_METADATA  0x3
 #define          MC_CMD_PARSER_DISP_RW_IN_MISC_STATE  0x5
 /* identifies the type of operation requested */
 #define       MC_CMD_PARSER_DISP_RW_IN_OP_OFST 4
-/* enum: read a word of DICPU DMEM or a LUE entry */
+#define       MC_CMD_PARSER_DISP_RW_IN_OP_LEN 4
+/* enum: Read a word of DICPU DMEM or a LUE entry */
 #define          MC_CMD_PARSER_DISP_RW_IN_READ  0x0
-/* enum: write a word of DICPU DMEM or a LUE entry */
+/* enum: Write a word of DICPU DMEM or a LUE entry. */
 #define          MC_CMD_PARSER_DISP_RW_IN_WRITE  0x1
-/* enum: read-modify-write a word of DICPU DMEM (not valid for LUE) */
+/* enum: Read-modify-write a word of DICPU DMEM (not valid for LUE). */
 #define          MC_CMD_PARSER_DISP_RW_IN_RMW  0x2
 /* data memory address (DICPU targets) or LUE index (LUE targets) */
 #define       MC_CMD_PARSER_DISP_RW_IN_ADDRESS_OFST 8
+#define       MC_CMD_PARSER_DISP_RW_IN_ADDRESS_LEN 4
 /* selector (for MISC_STATE target) */
 #define       MC_CMD_PARSER_DISP_RW_IN_SELECTOR_OFST 8
+#define       MC_CMD_PARSER_DISP_RW_IN_SELECTOR_LEN 4
 /* enum: Port to datapath mapping */
 #define          MC_CMD_PARSER_DISP_RW_IN_PORT_DP_MAPPING  0x1
 /* value to write (for DMEM writes) */
 #define       MC_CMD_PARSER_DISP_RW_IN_DMEM_WRITE_VALUE_OFST 12
+#define       MC_CMD_PARSER_DISP_RW_IN_DMEM_WRITE_VALUE_LEN 4
 /* XOR value (for DMEM read-modify-writes: new = (old & mask) ^ value) */
 #define       MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_XOR_VALUE_OFST 12
+#define       MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_XOR_VALUE_LEN 4
 /* AND mask (for DMEM read-modify-writes: new = (old & mask) ^ value) */
 #define       MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_AND_MASK_OFST 16
+#define       MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_AND_MASK_LEN 4
 /* metadata format (for LUE reads using LUE_VERSIONED_METADATA) */
 #define       MC_CMD_PARSER_DISP_RW_IN_LUE_READ_METADATA_VERSION_OFST 12
+#define       MC_CMD_PARSER_DISP_RW_IN_LUE_READ_METADATA_VERSION_LEN 4
 /* value to write (for LUE writes) */
 #define       MC_CMD_PARSER_DISP_RW_IN_LUE_WRITE_VALUE_OFST 12
 #define       MC_CMD_PARSER_DISP_RW_IN_LUE_WRITE_VALUE_LEN 20
 #define    MC_CMD_PARSER_DISP_RW_OUT_LEN 52
 /* value read (for DMEM reads) */
 #define       MC_CMD_PARSER_DISP_RW_OUT_DMEM_READ_VALUE_OFST 0
+#define       MC_CMD_PARSER_DISP_RW_OUT_DMEM_READ_VALUE_LEN 4
 /* value read (for LUE reads) */
 #define       MC_CMD_PARSER_DISP_RW_OUT_LUE_READ_VALUE_OFST 0
 #define       MC_CMD_PARSER_DISP_RW_OUT_LUE_READ_VALUE_LEN 20
 #define    MC_CMD_SET_PF_COUNT_IN_LEN 4
 /* New number of PFs on the device. */
 #define       MC_CMD_SET_PF_COUNT_IN_PF_COUNT_OFST 0
+#define       MC_CMD_SET_PF_COUNT_IN_PF_COUNT_LEN 4
 
 /* MC_CMD_SET_PF_COUNT_OUT msgresponse */
 #define    MC_CMD_SET_PF_COUNT_OUT_LEN 0
 #define    MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN 4
 /* Identifies the port assignment for this function. */
 #define       MC_CMD_GET_PORT_ASSIGNMENT_OUT_PORT_OFST 0
+#define       MC_CMD_GET_PORT_ASSIGNMENT_OUT_PORT_LEN 4
 
 
 /***********************************/
 #define    MC_CMD_SET_PORT_ASSIGNMENT_IN_LEN 4
 /* Identifies the port assignment for this function. */
 #define       MC_CMD_SET_PORT_ASSIGNMENT_IN_PORT_OFST 0
+#define       MC_CMD_SET_PORT_ASSIGNMENT_IN_PORT_LEN 4
 
 /* MC_CMD_SET_PORT_ASSIGNMENT_OUT msgresponse */
 #define    MC_CMD_SET_PORT_ASSIGNMENT_OUT_LEN 0
 #define    MC_CMD_ALLOC_VIS_IN_LEN 8
 /* The minimum number of VIs that is acceptable */
 #define       MC_CMD_ALLOC_VIS_IN_MIN_VI_COUNT_OFST 0
+#define       MC_CMD_ALLOC_VIS_IN_MIN_VI_COUNT_LEN 4
 /* The maximum number of VIs that would be useful */
 #define       MC_CMD_ALLOC_VIS_IN_MAX_VI_COUNT_OFST 4
+#define       MC_CMD_ALLOC_VIS_IN_MAX_VI_COUNT_LEN 4
 
 /* MC_CMD_ALLOC_VIS_OUT msgresponse: Huntington-compatible VI_ALLOC request.
  * Use extended version in new code.
 #define    MC_CMD_ALLOC_VIS_OUT_LEN 8
 /* The number of VIs allocated on this function */
 #define       MC_CMD_ALLOC_VIS_OUT_VI_COUNT_OFST 0
+#define       MC_CMD_ALLOC_VIS_OUT_VI_COUNT_LEN 4
 /* The base absolute VI number allocated to this function. Required to
  * correctly interpret wakeup events.
  */
 #define       MC_CMD_ALLOC_VIS_OUT_VI_BASE_OFST 4
+#define       MC_CMD_ALLOC_VIS_OUT_VI_BASE_LEN 4
 
 /* MC_CMD_ALLOC_VIS_EXT_OUT msgresponse */
 #define    MC_CMD_ALLOC_VIS_EXT_OUT_LEN 12
 /* The number of VIs allocated on this function */
 #define       MC_CMD_ALLOC_VIS_EXT_OUT_VI_COUNT_OFST 0
+#define       MC_CMD_ALLOC_VIS_EXT_OUT_VI_COUNT_LEN 4
 /* The base absolute VI number allocated to this function. Required to
  * correctly interpret wakeup events.
  */
 #define       MC_CMD_ALLOC_VIS_EXT_OUT_VI_BASE_OFST 4
+#define       MC_CMD_ALLOC_VIS_EXT_OUT_VI_BASE_LEN 4
 /* Function's port vi_shift value (always 0 on Huntington) */
 #define       MC_CMD_ALLOC_VIS_EXT_OUT_VI_SHIFT_OFST 8
+#define       MC_CMD_ALLOC_VIS_EXT_OUT_VI_SHIFT_LEN 4
 
 
 /***********************************/
 #define    MC_CMD_GET_SRIOV_CFG_OUT_LEN 20
 /* Number of VFs currently enabled. */
 #define       MC_CMD_GET_SRIOV_CFG_OUT_VF_CURRENT_OFST 0
+#define       MC_CMD_GET_SRIOV_CFG_OUT_VF_CURRENT_LEN 4
 /* Max number of VFs before sriov stride and offset may need to be changed. */
 #define       MC_CMD_GET_SRIOV_CFG_OUT_VF_MAX_OFST 4
+#define       MC_CMD_GET_SRIOV_CFG_OUT_VF_MAX_LEN 4
 #define       MC_CMD_GET_SRIOV_CFG_OUT_FLAGS_OFST 8
+#define       MC_CMD_GET_SRIOV_CFG_OUT_FLAGS_LEN 4
 #define        MC_CMD_GET_SRIOV_CFG_OUT_VF_ENABLED_LBN 0
 #define        MC_CMD_GET_SRIOV_CFG_OUT_VF_ENABLED_WIDTH 1
 /* RID offset of first VF from PF. */
 #define       MC_CMD_GET_SRIOV_CFG_OUT_VF_OFFSET_OFST 12
+#define       MC_CMD_GET_SRIOV_CFG_OUT_VF_OFFSET_LEN 4
 /* RID offset of each subsequent VF from the previous. */
 #define       MC_CMD_GET_SRIOV_CFG_OUT_VF_STRIDE_OFST 16
+#define       MC_CMD_GET_SRIOV_CFG_OUT_VF_STRIDE_LEN 4
 
 
 /***********************************/
 #define    MC_CMD_SET_SRIOV_CFG_IN_LEN 20
 /* Number of VFs currently enabled. */
 #define       MC_CMD_SET_SRIOV_CFG_IN_VF_CURRENT_OFST 0
+#define       MC_CMD_SET_SRIOV_CFG_IN_VF_CURRENT_LEN 4
 /* Max number of VFs before sriov stride and offset may need to be changed. */
 #define       MC_CMD_SET_SRIOV_CFG_IN_VF_MAX_OFST 4
+#define       MC_CMD_SET_SRIOV_CFG_IN_VF_MAX_LEN 4
 #define       MC_CMD_SET_SRIOV_CFG_IN_FLAGS_OFST 8
+#define       MC_CMD_SET_SRIOV_CFG_IN_FLAGS_LEN 4
 #define        MC_CMD_SET_SRIOV_CFG_IN_VF_ENABLED_LBN 0
 #define        MC_CMD_SET_SRIOV_CFG_IN_VF_ENABLED_WIDTH 1
 /* RID offset of first VF from PF, or 0 for no change, or
  * MC_CMD_RESOURCE_INSTANCE_ANY to allow the system to allocate an offset.
  */
 #define       MC_CMD_SET_SRIOV_CFG_IN_VF_OFFSET_OFST 12
+#define       MC_CMD_SET_SRIOV_CFG_IN_VF_OFFSET_LEN 4
 /* RID offset of each subsequent VF from the previous, 0 for no change, or
  * MC_CMD_RESOURCE_INSTANCE_ANY to allow the system to allocate a stride.
  */
 #define       MC_CMD_SET_SRIOV_CFG_IN_VF_STRIDE_OFST 16
+#define       MC_CMD_SET_SRIOV_CFG_IN_VF_STRIDE_LEN 4
 
 /* MC_CMD_SET_SRIOV_CFG_OUT msgresponse */
 #define    MC_CMD_SET_SRIOV_CFG_OUT_LEN 0
 #define    MC_CMD_GET_VI_ALLOC_INFO_OUT_LEN 12
 /* The number of VIs allocated on this function */
 #define       MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_COUNT_OFST 0
+#define       MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_COUNT_LEN 4
 /* The base absolute VI number allocated to this function. Required to
  * correctly interpret wakeup events.
  */
 #define       MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_BASE_OFST 4
+#define       MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_BASE_LEN 4
 /* Function's port vi_shift value (always 0 on Huntington) */
 #define       MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_SHIFT_OFST 8
+#define       MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_SHIFT_LEN 4
 
 
 /***********************************/
 #define    MC_CMD_DUMP_VI_STATE_IN_LEN 4
 /* The VI number to query. */
 #define       MC_CMD_DUMP_VI_STATE_IN_VI_NUMBER_OFST 0
+#define       MC_CMD_DUMP_VI_STATE_IN_VI_NUMBER_LEN 4
 
 /* MC_CMD_DUMP_VI_STATE_OUT msgresponse */
 #define    MC_CMD_DUMP_VI_STATE_OUT_LEN 96
 #define       MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_HI_OFST 24
 /* Combined metadata field. */
 #define       MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_OFST 28
+#define       MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_LEN 4
 #define        MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_BASE_LBN 0
 #define        MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_BASE_WIDTH 16
 #define        MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_NPAGES_LBN 16
 #define    MC_CMD_ALLOC_PIOBUF_OUT_LEN 4
 /* Handle for allocated push I/O buffer. */
 #define       MC_CMD_ALLOC_PIOBUF_OUT_PIOBUF_HANDLE_OFST 0
+#define       MC_CMD_ALLOC_PIOBUF_OUT_PIOBUF_HANDLE_LEN 4
 
 
 /***********************************/
 #define    MC_CMD_FREE_PIOBUF_IN_LEN 4
 /* Handle for allocated push I/O buffer. */
 #define       MC_CMD_FREE_PIOBUF_IN_PIOBUF_HANDLE_OFST 0
+#define       MC_CMD_FREE_PIOBUF_IN_PIOBUF_HANDLE_LEN 4
 
 /* MC_CMD_FREE_PIOBUF_OUT msgresponse */
 #define    MC_CMD_FREE_PIOBUF_OUT_LEN 0
 #define    MC_CMD_GET_VI_TLP_PROCESSING_IN_LEN 4
 /* VI number to get information for. */
 #define       MC_CMD_GET_VI_TLP_PROCESSING_IN_INSTANCE_OFST 0
+#define       MC_CMD_GET_VI_TLP_PROCESSING_IN_INSTANCE_LEN 4
 
 /* MC_CMD_GET_VI_TLP_PROCESSING_OUT msgresponse */
 #define    MC_CMD_GET_VI_TLP_PROCESSING_OUT_LEN 4
 #define       MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_ON_LBN 19
 #define       MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_ON_WIDTH 1
 #define       MC_CMD_GET_VI_TLP_PROCESSING_OUT_DATA_OFST 0
+#define       MC_CMD_GET_VI_TLP_PROCESSING_OUT_DATA_LEN 4
 
 
 /***********************************/
 #define    MC_CMD_SET_VI_TLP_PROCESSING_IN_LEN 8
 /* VI number to set information for. */
 #define       MC_CMD_SET_VI_TLP_PROCESSING_IN_INSTANCE_OFST 0
+#define       MC_CMD_SET_VI_TLP_PROCESSING_IN_INSTANCE_LEN 4
 /* Transaction processing steering hint 1 for use with the Rx Queue. */
 #define       MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG1_RX_OFST 4
 #define       MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG1_RX_LEN 1
 #define       MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_ON_LBN 51
 #define       MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_ON_WIDTH 1
 #define       MC_CMD_SET_VI_TLP_PROCESSING_IN_DATA_OFST 4
+#define       MC_CMD_SET_VI_TLP_PROCESSING_IN_DATA_LEN 4
 
 /* MC_CMD_SET_VI_TLP_PROCESSING_OUT msgresponse */
 #define    MC_CMD_SET_VI_TLP_PROCESSING_OUT_LEN 0
 /* MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN msgrequest */
 #define    MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_LEN 4
 #define       MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_OFST 0
+#define       MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_LEN 4
 /* enum: MISC. */
 #define          MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_MISC  0x0
 /* enum: IDO. */
 /* MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT msgresponse */
 #define    MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_LEN 8
 #define       MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_GLOBAL_CATEGORY_OFST 0
+#define       MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_GLOBAL_CATEGORY_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN/TLP_GLOBAL_CATEGORY */
 /* Amalgamated TLP info word. */
 #define       MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_WORD_OFST 4
+#define       MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_WORD_LEN 4
 #define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_WTAG_EN_LBN 0
 #define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_WTAG_EN_WIDTH 1
 #define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_SPARE_LBN 1
 /* MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN msgrequest */
 #define    MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_LEN 8
 #define       MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_OFST 0
+#define       MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_GET_TLP_PROCESSING_GLOBALS/MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN/TLP_GLOBAL_CATEGORY */
 /* Amalgamated TLP info word. */
 #define       MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_WORD_OFST 4
+#define       MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_WORD_LEN 4
 #define        MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_MISC_WTAG_EN_LBN 0
 #define        MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_MISC_WTAG_EN_WIDTH 1
 #define        MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_DL_EN_LBN 0
  * in a command from the host.)
  */
 #define       MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_OFST 0
+#define       MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_LEN 4
 #define          MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_IDLE     0x0 /* enum */
 #define          MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_RESET    0x1 /* enum */
 #define          MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_IMEMS    0x2 /* enum */
  * mc_flash_layout.h.)
  */
 #define       MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_OFST 4
+#define       MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_LEN 4
 /* enum: Valid in phase 2 (PHASE_IMEMS) only */
 #define          MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDI_TEXT  0x0
 /* enum: Valid in phase 2 (PHASE_IMEMS) only */
 #define          MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_ALL  0xffffffff
 /* Chunk ID, or CHUNK_ID_LAST or CHUNK_ID_ABORT */
 #define       MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_OFST 8
+#define       MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_LEN 4
 /* enum: Last chunk, containing checksum rather than data */
 #define          MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_LAST  0xffffffff
 /* enum: Abort download of this item */
 #define          MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_ABORT  0xfffffffe
 /* Length of this chunk in bytes */
 #define       MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_LEN_OFST 12
+#define       MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_LEN_LEN 4
 /* Data for this chunk */
 #define       MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_OFST 16
 #define       MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_LEN 4
 #define    MC_CMD_SATELLITE_DOWNLOAD_OUT_LEN 8
 /* Same as MC_CMD_ERR field, but included as 0 in success cases */
 #define       MC_CMD_SATELLITE_DOWNLOAD_OUT_RESULT_OFST 0
+#define       MC_CMD_SATELLITE_DOWNLOAD_OUT_RESULT_LEN 4
 /* Extra status information */
 #define       MC_CMD_SATELLITE_DOWNLOAD_OUT_INFO_OFST 4
+#define       MC_CMD_SATELLITE_DOWNLOAD_OUT_INFO_LEN 4
 /* enum: Code download OK, completed. */
 #define          MC_CMD_SATELLITE_DOWNLOAD_OUT_OK_COMPLETE  0x0
 /* enum: Code download aborted as requested. */
 #define    MC_CMD_GET_CAPABILITIES_OUT_LEN 20
 /* First word of flags. */
 #define       MC_CMD_GET_CAPABILITIES_OUT_FLAGS1_OFST 0
+#define       MC_CMD_GET_CAPABILITIES_OUT_FLAGS1_LEN 4
 #define        MC_CMD_GET_CAPABILITIES_OUT_VPORT_RECONFIGURE_LBN 3
 #define        MC_CMD_GET_CAPABILITIES_OUT_VPORT_RECONFIGURE_WIDTH 1
 #define        MC_CMD_GET_CAPABILITIES_OUT_TX_STRIPING_LBN 4
 #define          MC_CMD_GET_CAPABILITIES_OUT_RXDP_LOW_LATENCY  0x1
 /* enum: Packed stream RXDP firmware */
 #define          MC_CMD_GET_CAPABILITIES_OUT_RXDP_PACKED_STREAM  0x2
+/* enum: Rules engine RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_OUT_RXDP_RULES_ENGINE  0x5
 /* enum: BIST RXDP firmware */
 #define          MC_CMD_GET_CAPABILITIES_OUT_RXDP_BIST  0x10a
 /* enum: RXDP Test firmware image 1 */
 #define          MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_DISABLE_DL  0x108
 /* enum: RXDP Test firmware image 9 */
 #define          MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_DOORBELL_DELAY  0x10b
+/* enum: RXDP Test firmware image 10 */
+#define          MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_SLOW  0x10c
 /* TxDPCPU firmware id. */
 #define       MC_CMD_GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID_OFST 6
 #define       MC_CMD_GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID_LEN 2
 #define          MC_CMD_GET_CAPABILITIES_OUT_TXDP_LOW_LATENCY  0x1
 /* enum: High packet rate TXDP firmware */
 #define          MC_CMD_GET_CAPABILITIES_OUT_TXDP_HIGH_PACKET_RATE  0x3
+/* enum: Rules engine TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_OUT_TXDP_RULES_ENGINE  0x5
 /* enum: BIST TXDP firmware */
 #define          MC_CMD_GET_CAPABILITIES_OUT_TXDP_BIST  0x12d
 /* enum: TXDP Test firmware image 1 */
  * (Huntington development only)
  */
 #define          MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT  0x2
-/* enum: Virtual switching (full feature) RX PD production firmware */
+/* enum: Full featured RX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_FULL_FEATURED  0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
 #define          MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_VSWITCH  0x3
 /* enum: siena_compat variant RX PD firmware using PM rather than MAC
  * (Huntington development only)
 #define          MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_LAYER2_PERF  0x7
 /* enum: Rules engine RX PD production firmware */
 #define          MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_RULES_ENGINE  0x8
+/* enum: reserved value - do not use (bug69716) */
+#define          MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_RESERVED_9  0x9
 /* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
 #define          MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE  0xe
 /* enum: RX PD firmware parsing but not filtering network overlay tunnel
  * (Huntington development only)
  */
 #define          MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT  0x2
-/* enum: Virtual switching (full feature) TX PD production firmware */
+/* enum: Full featured TX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_FULL_FEATURED  0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
 #define          MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_VSWITCH  0x3
 /* enum: siena_compat variant TX PD firmware using PM rather than MAC
  * (Huntington development only)
 #define          MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_LAYER2_PERF  0x7
 /* enum: Rules engine TX PD production firmware */
 #define          MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_RULES_ENGINE  0x8
+/* enum: reserved value - do not use (bug69716) */
+#define          MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_RESERVED_9  0x9
 /* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
 #define          MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE  0xe
 /* Hardware capabilities of NIC */
 #define       MC_CMD_GET_CAPABILITIES_OUT_HW_CAPABILITIES_OFST 12
+#define       MC_CMD_GET_CAPABILITIES_OUT_HW_CAPABILITIES_LEN 4
 /* Licensed capabilities */
 #define       MC_CMD_GET_CAPABILITIES_OUT_LICENSE_CAPABILITIES_OFST 16
+#define       MC_CMD_GET_CAPABILITIES_OUT_LICENSE_CAPABILITIES_LEN 4
 
 /* MC_CMD_GET_CAPABILITIES_V2_IN msgrequest */
 #define    MC_CMD_GET_CAPABILITIES_V2_IN_LEN 0
 #define    MC_CMD_GET_CAPABILITIES_V2_OUT_LEN 72
 /* First word of flags. */
 #define       MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS1_OFST 0
+#define       MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS1_LEN 4
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_VPORT_RECONFIGURE_LBN 3
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_VPORT_RECONFIGURE_WIDTH 1
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_TX_STRIPING_LBN 4
 #define          MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_LOW_LATENCY  0x1
 /* enum: Packed stream RXDP firmware */
 #define          MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_PACKED_STREAM  0x2
+/* enum: Rules engine RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_RULES_ENGINE  0x5
 /* enum: BIST RXDP firmware */
 #define          MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_BIST  0x10a
 /* enum: RXDP Test firmware image 1 */
 #define          MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_DISABLE_DL  0x108
 /* enum: RXDP Test firmware image 9 */
 #define          MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_DOORBELL_DELAY  0x10b
+/* enum: RXDP Test firmware image 10 */
+#define          MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_SLOW  0x10c
 /* TxDPCPU firmware id. */
 #define       MC_CMD_GET_CAPABILITIES_V2_OUT_TX_DPCPU_FW_ID_OFST 6
 #define       MC_CMD_GET_CAPABILITIES_V2_OUT_TX_DPCPU_FW_ID_LEN 2
 #define          MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_LOW_LATENCY  0x1
 /* enum: High packet rate TXDP firmware */
 #define          MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_HIGH_PACKET_RATE  0x3
+/* enum: Rules engine TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_RULES_ENGINE  0x5
 /* enum: BIST TXDP firmware */
 #define          MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_BIST  0x12d
 /* enum: TXDP Test firmware image 1 */
  * (Huntington development only)
  */
 #define          MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_SIENA_COMPAT  0x2
-/* enum: Virtual switching (full feature) RX PD production firmware */
+/* enum: Full featured RX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_FULL_FEATURED  0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
 #define          MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_VSWITCH  0x3
 /* enum: siena_compat variant RX PD firmware using PM rather than MAC
  * (Huntington development only)
 #define          MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_LAYER2_PERF  0x7
 /* enum: Rules engine RX PD production firmware */
 #define          MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_RULES_ENGINE  0x8
+/* enum: reserved value - do not use (bug69716) */
+#define          MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_RESERVED_9  0x9
 /* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
 #define          MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE  0xe
 /* enum: RX PD firmware parsing but not filtering network overlay tunnel
  * (Huntington development only)
  */
 #define          MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_SIENA_COMPAT  0x2
-/* enum: Virtual switching (full feature) TX PD production firmware */
+/* enum: Full featured TX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_FULL_FEATURED  0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
 #define          MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_VSWITCH  0x3
 /* enum: siena_compat variant TX PD firmware using PM rather than MAC
  * (Huntington development only)
 #define          MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_LAYER2_PERF  0x7
 /* enum: Rules engine TX PD production firmware */
 #define          MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_RULES_ENGINE  0x8
+/* enum: reserved value - do not use (bug69716) */
+#define          MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_RESERVED_9  0x9
 /* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
 #define          MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE  0xe
 /* Hardware capabilities of NIC */
 #define       MC_CMD_GET_CAPABILITIES_V2_OUT_HW_CAPABILITIES_OFST 12
+#define       MC_CMD_GET_CAPABILITIES_V2_OUT_HW_CAPABILITIES_LEN 4
 /* Licensed capabilities */
 #define       MC_CMD_GET_CAPABILITIES_V2_OUT_LICENSE_CAPABILITIES_OFST 16
+#define       MC_CMD_GET_CAPABILITIES_V2_OUT_LICENSE_CAPABILITIES_LEN 4
 /* Second word of flags. Not present on older firmware (check the length). */
 #define       MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS2_OFST 20
+#define       MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS2_LEN 4
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_LBN 0
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_WIDTH 1
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_ENCAP_LBN 1
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_TX_SNIFF_WIDTH 1
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_BACKGROUND_LBN 13
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_BACKGROUND_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_DB_RETURN_LBN 14
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_DB_RETURN_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_CTPIO_LBN 15
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_CTPIO_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_SUPPORT_LBN 16
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_SUPPORT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_BOUND_LBN 17
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_BOUND_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1
 /* Number of FATSOv2 contexts per datapath supported by this NIC. Not present
  * on older firmware (check the length).
  */
 #define       MC_CMD_GET_CAPABILITIES_V2_OUT_SIZE_PIO_BUFF_LEN 2
 
 /* MC_CMD_GET_CAPABILITIES_V3_OUT msgresponse */
-#define    MC_CMD_GET_CAPABILITIES_V3_OUT_LEN 73
+#define    MC_CMD_GET_CAPABILITIES_V3_OUT_LEN 76
 /* First word of flags. */
 #define       MC_CMD_GET_CAPABILITIES_V3_OUT_FLAGS1_OFST 0
+#define       MC_CMD_GET_CAPABILITIES_V3_OUT_FLAGS1_LEN 4
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_VPORT_RECONFIGURE_LBN 3
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_VPORT_RECONFIGURE_WIDTH 1
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_TX_STRIPING_LBN 4
 #define          MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_LOW_LATENCY  0x1
 /* enum: Packed stream RXDP firmware */
 #define          MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_PACKED_STREAM  0x2
+/* enum: Rules engine RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_RULES_ENGINE  0x5
 /* enum: BIST RXDP firmware */
 #define          MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_BIST  0x10a
 /* enum: RXDP Test firmware image 1 */
 #define          MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_DISABLE_DL  0x108
 /* enum: RXDP Test firmware image 9 */
 #define          MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_DOORBELL_DELAY  0x10b
+/* enum: RXDP Test firmware image 10 */
+#define          MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_SLOW  0x10c
 /* TxDPCPU firmware id. */
 #define       MC_CMD_GET_CAPABILITIES_V3_OUT_TX_DPCPU_FW_ID_OFST 6
 #define       MC_CMD_GET_CAPABILITIES_V3_OUT_TX_DPCPU_FW_ID_LEN 2
 #define          MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_LOW_LATENCY  0x1
 /* enum: High packet rate TXDP firmware */
 #define          MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_HIGH_PACKET_RATE  0x3
+/* enum: Rules engine TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_RULES_ENGINE  0x5
 /* enum: BIST TXDP firmware */
 #define          MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_BIST  0x12d
 /* enum: TXDP Test firmware image 1 */
  * (Huntington development only)
  */
 #define          MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_SIENA_COMPAT  0x2
-/* enum: Virtual switching (full feature) RX PD production firmware */
+/* enum: Full featured RX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_FULL_FEATURED  0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
 #define          MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_VSWITCH  0x3
 /* enum: siena_compat variant RX PD firmware using PM rather than MAC
  * (Huntington development only)
 #define          MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_LAYER2_PERF  0x7
 /* enum: Rules engine RX PD production firmware */
 #define          MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_RULES_ENGINE  0x8
+/* enum: reserved value - do not use (bug69716) */
+#define          MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_RESERVED_9  0x9
 /* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
 #define          MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE  0xe
 /* enum: RX PD firmware parsing but not filtering network overlay tunnel
  * (Huntington development only)
  */
 #define          MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_SIENA_COMPAT  0x2
-/* enum: Virtual switching (full feature) TX PD production firmware */
+/* enum: Full featured TX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_FULL_FEATURED  0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
 #define          MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_VSWITCH  0x3
 /* enum: siena_compat variant TX PD firmware using PM rather than MAC
  * (Huntington development only)
 #define          MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_LAYER2_PERF  0x7
 /* enum: Rules engine TX PD production firmware */
 #define          MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_RULES_ENGINE  0x8
+/* enum: reserved value - do not use (bug69716) */
+#define          MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_RESERVED_9  0x9
 /* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
 #define          MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE  0xe
 /* Hardware capabilities of NIC */
 #define       MC_CMD_GET_CAPABILITIES_V3_OUT_HW_CAPABILITIES_OFST 12
+#define       MC_CMD_GET_CAPABILITIES_V3_OUT_HW_CAPABILITIES_LEN 4
 /* Licensed capabilities */
 #define       MC_CMD_GET_CAPABILITIES_V3_OUT_LICENSE_CAPABILITIES_OFST 16
+#define       MC_CMD_GET_CAPABILITIES_V3_OUT_LICENSE_CAPABILITIES_LEN 4
 /* Second word of flags. Not present on older firmware (check the length). */
 #define       MC_CMD_GET_CAPABILITIES_V3_OUT_FLAGS2_OFST 20
+#define       MC_CMD_GET_CAPABILITIES_V3_OUT_FLAGS2_LEN 4
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_LBN 0
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_WIDTH 1
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_ENCAP_LBN 1
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_TX_SNIFF_WIDTH 1
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_BACKGROUND_LBN 13
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_BACKGROUND_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_DB_RETURN_LBN 14
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_DB_RETURN_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_CTPIO_LBN 15
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_CTPIO_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_SUPPORT_LBN 16
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_SUPPORT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_BOUND_LBN 17
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_BOUND_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1
 /* Number of FATSOv2 contexts per datapath supported by this NIC. Not present
  * on older firmware (check the length).
  */
 #define          MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_16K  0x1
 /* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */
 #define          MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_64K  0x2
+/* Number of vFIFOs per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V3_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73
+#define       MC_CMD_GET_CAPABILITIES_V3_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1
+/* Number of buffers per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V3_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74
+#define       MC_CMD_GET_CAPABILITIES_V3_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2
+
+/* MC_CMD_GET_CAPABILITIES_V4_OUT msgresponse */
+#define    MC_CMD_GET_CAPABILITIES_V4_OUT_LEN 78
+/* First word of flags. */
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS1_OFST 0
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS1_LEN 4
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_VPORT_RECONFIGURE_LBN 3
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_VPORT_RECONFIGURE_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_STRIPING_LBN 4
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_STRIPING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_QUERY_LBN 5
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_QUERY_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_DRV_ATTACH_PREBOOT_LBN 7
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_DRV_ATTACH_PREBOOT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_FORCE_EVENT_MERGING_LBN 8
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_SET_MAC_ENHANCED_LBN 9
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_SET_MAC_ENHANCED_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_SECURITY_FILTERING_LBN 12
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_ADDITIONAL_RSS_MODES_LBN 13
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_ADDITIONAL_RSS_MODES_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_QBB_LBN 14
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_QBB_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_RSS_LIMITED_LBN 16
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_RSS_LIMITED_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_LBN 17
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_INCLUDE_FCS_LBN 18
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_INCLUDE_FCS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VLAN_INSERTION_LBN 19
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VLAN_INSERTION_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_VLAN_STRIPPING_LBN 20
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_VLAN_STRIPPING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_LBN 21
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_0_LBN 22
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_0_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_14_LBN 23
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_14_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_TIMESTAMP_LBN 24
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_TIMESTAMP_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_BATCHING_LBN 25
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_BATCHING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_MCAST_FILTER_CHAINING_LBN 26
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_MCAST_FILTER_CHAINING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_PM_AND_RXDP_COUNTERS_LBN 27
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DISABLE_SCATTER_LBN 28
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DISABLE_SCATTER_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_LBN 30
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_VXLAN_NVGRE_LBN 31
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_VXLAN_NVGRE_WIDTH 1
+/* RxDPCPU firmware id. */
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DPCPU_FW_ID_OFST 4
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DPCPU_FW_ID_LEN 2
+/* enum: Standard RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP  0x0
+/* enum: Low latency RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_LOW_LATENCY  0x1
+/* enum: Packed stream RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_PACKED_STREAM  0x2
+/* enum: Rules engine RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_RULES_ENGINE  0x5
+/* enum: BIST RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_BIST  0x10a
+/* enum: RXDP Test firmware image 1 */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH  0x101
+/* enum: RXDP Test firmware image 2 */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD  0x102
+/* enum: RXDP Test firmware image 3 */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST  0x103
+/* enum: RXDP Test firmware image 4 */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE  0x104
+/* enum: RXDP Test firmware image 5 */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_BACKPRESSURE  0x105
+/* enum: RXDP Test firmware image 6 */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_PACKET_EDITS  0x106
+/* enum: RXDP Test firmware image 7 */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_RX_HDR_SPLIT  0x107
+/* enum: RXDP Test firmware image 8 */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_DISABLE_DL  0x108
+/* enum: RXDP Test firmware image 9 */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_DOORBELL_DELAY  0x10b
+/* enum: RXDP Test firmware image 10 */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_SLOW  0x10c
+/* TxDPCPU firmware id. */
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_TX_DPCPU_FW_ID_OFST 6
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_TX_DPCPU_FW_ID_LEN 2
+/* enum: Standard TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP  0x0
+/* enum: Low latency TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_LOW_LATENCY  0x1
+/* enum: High packet rate TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_HIGH_PACKET_RATE  0x3
+/* enum: Rules engine TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_RULES_ENGINE  0x5
+/* enum: BIST TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_BIST  0x12d
+/* enum: TXDP Test firmware image 1 */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_TEST_FW_TSO_EDIT  0x101
+/* enum: TXDP Test firmware image 2 */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_TEST_FW_PACKET_EDITS  0x102
+/* enum: TXDP CSR bus test firmware */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_TEST_FW_CSR  0x103
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_OFST 8
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_LEN 2
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_REV_LBN 0
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_REV_WIDTH 12
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_TYPE_LBN 12
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_RESERVED  0x0
+/* enum: Trivial RX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_FIRST_PKT  0x1
+/* enum: RX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_SIENA_COMPAT  0x2
+/* enum: Full featured RX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_FULL_FEATURED  0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_VSWITCH  0x3
+/* enum: siena_compat variant RX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM  0x4
+/* enum: Low latency RX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_LOW_LATENCY  0x5
+/* enum: Packed stream RX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_PACKED_STREAM  0x6
+/* enum: RX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_LAYER2_PERF  0x7
+/* enum: Rules engine RX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_RULES_ENGINE  0x8
+/* enum: reserved value - do not use (bug69716) */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_RESERVED_9  0x9
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE  0xe
+/* enum: RX PD firmware parsing but not filtering network overlay tunnel
+ * encapsulations (Medford development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY  0xf
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_OFST 10
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_LEN 2
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_REV_LBN 0
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_REV_WIDTH 12
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_TYPE_LBN 12
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_RESERVED  0x0
+/* enum: Trivial TX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_FIRST_PKT  0x1
+/* enum: TX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_SIENA_COMPAT  0x2
+/* enum: Full featured TX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_FULL_FEATURED  0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_VSWITCH  0x3
+/* enum: siena_compat variant TX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM  0x4
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_LOW_LATENCY  0x5 /* enum */
+/* enum: TX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_LAYER2_PERF  0x7
+/* enum: Rules engine TX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_RULES_ENGINE  0x8
+/* enum: reserved value - do not use (bug69716) */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_RESERVED_9  0x9
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE  0xe
+/* Hardware capabilities of NIC */
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_HW_CAPABILITIES_OFST 12
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_HW_CAPABILITIES_LEN 4
+/* Licensed capabilities */
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_LICENSE_CAPABILITIES_OFST 16
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_LICENSE_CAPABILITIES_LEN 4
+/* Second word of flags. Not present on older firmware (check the length). */
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS2_OFST 20
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS2_LEN 4
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_LBN 0
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_ENCAP_LBN 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_ENCAP_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_EVQ_TIMER_CTRL_LBN 2
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_EVQ_TIMER_CTRL_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_EVENT_CUT_THROUGH_LBN 3
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_EVENT_CUT_THROUGH_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_CUT_THROUGH_LBN 4
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_CUT_THROUGH_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VFIFO_ULL_MODE_LBN 5
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VFIFO_ULL_MODE_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_EVQ_V2_LBN 7
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_EVQ_V2_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_TIMESTAMPING_LBN 8
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_TIMESTAMPING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TIMESTAMP_LBN 9
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TIMESTAMP_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_SNIFF_LBN 10
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_SNIFF_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_SNIFF_LBN 11
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_SNIFF_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_BACKGROUND_LBN 13
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_BACKGROUND_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_DB_RETURN_LBN 14
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_DB_RETURN_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_CTPIO_LBN 15
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_CTPIO_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_SUPPORT_LBN 16
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_SUPPORT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_BOUND_LBN 17
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_BOUND_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1
+/* Number of FATSOv2 contexts per datapath supported by this NIC. Not present
+ * on older firmware (check the length).
+ */
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2
+/* One byte per PF containing the number of the external port assigned to this
+ * PF, indexed by PF number. Special values indicate that a PF is either not
+ * present or not assigned.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_ACCESS_NOT_PERMITTED  0xff
+/* enum: PF does not exist. */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_PF_NOT_PRESENT  0xfe
+/* enum: PF does exist but is not assigned to any external port. */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_PF_NOT_ASSIGNED  0xfd
+/* enum: This value indicates that PF is assigned, but it cannot be expressed
+ * in this field. It is intended for a possible future situation where a more
+ * complex scheme of PFs to ports mapping is being used. The future driver
+ * should look for a new field supporting the new scheme. The current/old
+ * driver should treat this value as PF_NOT_ASSIGNED.
+ */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_INCOMPATIBLE_ASSIGNMENT  0xfc
+/* One byte per PF containing the number of its VFs, indexed by PF number. A
+ * special value indicates that a PF is not present.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VFS_PER_PF_OFST 42
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VFS_PER_PF_LEN 1
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VFS_PER_PF_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+/*               MC_CMD_GET_CAPABILITIES_V4_OUT_ACCESS_NOT_PERMITTED  0xff */
+/* enum: PF does not exist. */
+/*               MC_CMD_GET_CAPABILITIES_V4_OUT_PF_NOT_PRESENT  0xfe */
+/* Number of VIs available for each external port */
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VIS_PER_PORT_OFST 58
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VIS_PER_PORT_LEN 2
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VIS_PER_PORT_NUM 4
+/* Size of RX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ RX_DESC_CACHE_SIZE)
+ */
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DESC_CACHE_SIZE_OFST 66
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DESC_CACHE_SIZE_LEN 1
+/* Size of TX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ TX_DESC_CACHE_SIZE)
+ */
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_TX_DESC_CACHE_SIZE_OFST 67
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_TX_DESC_CACHE_SIZE_LEN 1
+/* Total number of available PIO buffers */
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_PIO_BUFFS_OFST 68
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_PIO_BUFFS_LEN 2
+/* Size of a single PIO buffer */
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_SIZE_PIO_BUFF_OFST 70
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_SIZE_PIO_BUFF_LEN 2
+/* On chips later than Medford the amount of address space assigned to each VI
+ * is configurable. This is a global setting that the driver must query to
+ * discover the VI to address mapping. Cut-through PIO (CTPIO) is not available
+ * with 8k VI windows.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_OFST 72
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_LEN 1
+/* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k.
+ * CTPIO is not mapped.
+ */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_8K   0x0
+/* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_16K  0x1
+/* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_64K  0x2
+/* Number of vFIFOs per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1
+/* Number of buffers per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2
+/* Entry count in the MAC stats array, including the final GENERATION_END
+ * entry. For MAC stats DMA, drivers should allocate a buffer large enough to
+ * hold at least this many 64-bit stats values, if they wish to receive all
+ * available stats. If the buffer is shorter than MAC_STATS_NUM_STATS * 8, the
+ * stats array returned will be truncated.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS_OFST 76
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS_LEN 2
 
 
 /***********************************/
 #define    MC_CMD_TCM_BUCKET_ALLOC_OUT_LEN 4
 /* the bucket id */
 #define       MC_CMD_TCM_BUCKET_ALLOC_OUT_BUCKET_OFST 0
+#define       MC_CMD_TCM_BUCKET_ALLOC_OUT_BUCKET_LEN 4
 
 
 /***********************************/
 #define    MC_CMD_TCM_BUCKET_FREE_IN_LEN 4
 /* the bucket id */
 #define       MC_CMD_TCM_BUCKET_FREE_IN_BUCKET_OFST 0
+#define       MC_CMD_TCM_BUCKET_FREE_IN_BUCKET_LEN 4
 
 /* MC_CMD_TCM_BUCKET_FREE_OUT msgresponse */
 #define    MC_CMD_TCM_BUCKET_FREE_OUT_LEN 0
 #define    MC_CMD_TCM_BUCKET_INIT_IN_LEN 8
 /* the bucket id */
 #define       MC_CMD_TCM_BUCKET_INIT_IN_BUCKET_OFST 0
+#define       MC_CMD_TCM_BUCKET_INIT_IN_BUCKET_LEN 4
 /* the rate in mbps */
 #define       MC_CMD_TCM_BUCKET_INIT_IN_RATE_OFST 4
+#define       MC_CMD_TCM_BUCKET_INIT_IN_RATE_LEN 4
 
 /* MC_CMD_TCM_BUCKET_INIT_EXT_IN msgrequest */
 #define    MC_CMD_TCM_BUCKET_INIT_EXT_IN_LEN 12
 /* the bucket id */
 #define       MC_CMD_TCM_BUCKET_INIT_EXT_IN_BUCKET_OFST 0
+#define       MC_CMD_TCM_BUCKET_INIT_EXT_IN_BUCKET_LEN 4
 /* the rate in mbps */
 #define       MC_CMD_TCM_BUCKET_INIT_EXT_IN_RATE_OFST 4
+#define       MC_CMD_TCM_BUCKET_INIT_EXT_IN_RATE_LEN 4
 /* the desired maximum fill level */
 #define       MC_CMD_TCM_BUCKET_INIT_EXT_IN_MAX_FILL_OFST 8
+#define       MC_CMD_TCM_BUCKET_INIT_EXT_IN_MAX_FILL_LEN 4
 
 /* MC_CMD_TCM_BUCKET_INIT_OUT msgresponse */
 #define    MC_CMD_TCM_BUCKET_INIT_OUT_LEN 0
 #define    MC_CMD_TCM_TXQ_INIT_IN_LEN 28
 /* the txq id */
 #define       MC_CMD_TCM_TXQ_INIT_IN_QID_OFST 0
+#define       MC_CMD_TCM_TXQ_INIT_IN_QID_LEN 4
 /* the static priority associated with the txq */
 #define       MC_CMD_TCM_TXQ_INIT_IN_LABEL_OFST 4
+#define       MC_CMD_TCM_TXQ_INIT_IN_LABEL_LEN 4
 /* bitmask of the priority queues this txq is inserted into when inserted. */
 #define       MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAGS_OFST 8
+#define       MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAGS_LEN 4
 #define        MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_GUARANTEED_LBN 0
 #define        MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_GUARANTEED_WIDTH 1
 #define        MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_NORMAL_LBN 1
 #define        MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_LOW_WIDTH 1
 /* the reaction point (RP) bucket */
 #define       MC_CMD_TCM_TXQ_INIT_IN_RP_BKT_OFST 12
+#define       MC_CMD_TCM_TXQ_INIT_IN_RP_BKT_LEN 4
 /* an already reserved bucket (typically set to bucket associated with outer
  * vswitch)
  */
 #define       MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT1_OFST 16
+#define       MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT1_LEN 4
 /* an already reserved bucket (typically set to bucket associated with inner
  * vswitch)
  */
 #define       MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT2_OFST 20
+#define       MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT2_LEN 4
 /* the min bucket (typically for ETS/minimum bandwidth) */
 #define       MC_CMD_TCM_TXQ_INIT_IN_MIN_BKT_OFST 24
+#define       MC_CMD_TCM_TXQ_INIT_IN_MIN_BKT_LEN 4
 
 /* MC_CMD_TCM_TXQ_INIT_EXT_IN msgrequest */
 #define    MC_CMD_TCM_TXQ_INIT_EXT_IN_LEN 32
 /* the txq id */
 #define       MC_CMD_TCM_TXQ_INIT_EXT_IN_QID_OFST 0
+#define       MC_CMD_TCM_TXQ_INIT_EXT_IN_QID_LEN 4
 /* the static priority associated with the txq */
 #define       MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_NORMAL_OFST 4
+#define       MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_NORMAL_LEN 4
 /* bitmask of the priority queues this txq is inserted into when inserted. */
 #define       MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAGS_OFST 8
+#define       MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAGS_LEN 4
 #define        MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_GUARANTEED_LBN 0
 #define        MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_GUARANTEED_WIDTH 1
 #define        MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_NORMAL_LBN 1
 #define        MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_LOW_WIDTH 1
 /* the reaction point (RP) bucket */
 #define       MC_CMD_TCM_TXQ_INIT_EXT_IN_RP_BKT_OFST 12
+#define       MC_CMD_TCM_TXQ_INIT_EXT_IN_RP_BKT_LEN 4
 /* an already reserved bucket (typically set to bucket associated with outer
  * vswitch)
  */
 #define       MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT1_OFST 16
+#define       MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT1_LEN 4
 /* an already reserved bucket (typically set to bucket associated with inner
  * vswitch)
  */
 #define       MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT2_OFST 20
+#define       MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT2_LEN 4
 /* the min bucket (typically for ETS/minimum bandwidth) */
 #define       MC_CMD_TCM_TXQ_INIT_EXT_IN_MIN_BKT_OFST 24
+#define       MC_CMD_TCM_TXQ_INIT_EXT_IN_MIN_BKT_LEN 4
 /* the static priority associated with the txq */
 #define       MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_GUARANTEED_OFST 28
+#define       MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_GUARANTEED_LEN 4
 
 /* MC_CMD_TCM_TXQ_INIT_OUT msgresponse */
 #define    MC_CMD_TCM_TXQ_INIT_OUT_LEN 0
 #define    MC_CMD_LINK_PIOBUF_IN_LEN 8
 /* Handle for allocated push I/O buffer. */
 #define       MC_CMD_LINK_PIOBUF_IN_PIOBUF_HANDLE_OFST 0
+#define       MC_CMD_LINK_PIOBUF_IN_PIOBUF_HANDLE_LEN 4
 /* Function Local Instance (VI) number. */
 #define       MC_CMD_LINK_PIOBUF_IN_TXQ_INSTANCE_OFST 4
+#define       MC_CMD_LINK_PIOBUF_IN_TXQ_INSTANCE_LEN 4
 
 /* MC_CMD_LINK_PIOBUF_OUT msgresponse */
 #define    MC_CMD_LINK_PIOBUF_OUT_LEN 0
 #define    MC_CMD_UNLINK_PIOBUF_IN_LEN 4
 /* Function Local Instance (VI) number. */
 #define       MC_CMD_UNLINK_PIOBUF_IN_TXQ_INSTANCE_OFST 0
+#define       MC_CMD_UNLINK_PIOBUF_IN_TXQ_INSTANCE_LEN 4
 
 /* MC_CMD_UNLINK_PIOBUF_OUT msgresponse */
 #define    MC_CMD_UNLINK_PIOBUF_OUT_LEN 0
 #define    MC_CMD_VSWITCH_ALLOC_IN_LEN 16
 /* The port to connect to the v-switch's upstream port. */
 #define       MC_CMD_VSWITCH_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+#define       MC_CMD_VSWITCH_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4
 /* The type of v-switch to create. */
 #define       MC_CMD_VSWITCH_ALLOC_IN_TYPE_OFST 4
+#define       MC_CMD_VSWITCH_ALLOC_IN_TYPE_LEN 4
 /* enum: VLAN */
 #define          MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VLAN  0x1
 /* enum: VEB */
 #define          MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_TEST  0x5
 /* Flags controlling v-port creation */
 #define       MC_CMD_VSWITCH_ALLOC_IN_FLAGS_OFST 8
+#define       MC_CMD_VSWITCH_ALLOC_IN_FLAGS_LEN 4
 #define        MC_CMD_VSWITCH_ALLOC_IN_FLAG_AUTO_PORT_LBN 0
 #define        MC_CMD_VSWITCH_ALLOC_IN_FLAG_AUTO_PORT_WIDTH 1
 /* The number of VLAN tags to allow for attached v-ports. For VLAN aggregators,
  * v-ports with this number of tags.
  */
 #define       MC_CMD_VSWITCH_ALLOC_IN_NUM_VLAN_TAGS_OFST 12
+#define       MC_CMD_VSWITCH_ALLOC_IN_NUM_VLAN_TAGS_LEN 4
 
 /* MC_CMD_VSWITCH_ALLOC_OUT msgresponse */
 #define    MC_CMD_VSWITCH_ALLOC_OUT_LEN 0
 #define    MC_CMD_VSWITCH_FREE_IN_LEN 4
 /* The port to which the v-switch is connected. */
 #define       MC_CMD_VSWITCH_FREE_IN_UPSTREAM_PORT_ID_OFST 0
+#define       MC_CMD_VSWITCH_FREE_IN_UPSTREAM_PORT_ID_LEN 4
 
 /* MC_CMD_VSWITCH_FREE_OUT msgresponse */
 #define    MC_CMD_VSWITCH_FREE_OUT_LEN 0
 #define    MC_CMD_VSWITCH_QUERY_IN_LEN 4
 /* The port to which the v-switch is connected. */
 #define       MC_CMD_VSWITCH_QUERY_IN_UPSTREAM_PORT_ID_OFST 0
+#define       MC_CMD_VSWITCH_QUERY_IN_UPSTREAM_PORT_ID_LEN 4
 
 /* MC_CMD_VSWITCH_QUERY_OUT msgresponse */
 #define    MC_CMD_VSWITCH_QUERY_OUT_LEN 0
 #define    MC_CMD_VPORT_ALLOC_IN_LEN 20
 /* The port to which the v-switch is connected. */
 #define       MC_CMD_VPORT_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+#define       MC_CMD_VPORT_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4
 /* The type of the new v-port. */
 #define       MC_CMD_VPORT_ALLOC_IN_TYPE_OFST 4
+#define       MC_CMD_VPORT_ALLOC_IN_TYPE_LEN 4
 /* enum: VLAN (obsolete) */
 #define          MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_VLAN  0x1
 /* enum: VEB (obsolete) */
 #define          MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_TEST  0x6
 /* Flags controlling v-port creation */
 #define       MC_CMD_VPORT_ALLOC_IN_FLAGS_OFST 8
+#define       MC_CMD_VPORT_ALLOC_IN_FLAGS_LEN 4
 #define        MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_LBN 0
 #define        MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_WIDTH 1
 #define        MC_CMD_VPORT_ALLOC_IN_FLAG_VLAN_RESTRICT_LBN 1
  * v-switch.
  */
 #define       MC_CMD_VPORT_ALLOC_IN_NUM_VLAN_TAGS_OFST 12
+#define       MC_CMD_VPORT_ALLOC_IN_NUM_VLAN_TAGS_LEN 4
 /* The actual VLAN tags to insert/remove */
 #define       MC_CMD_VPORT_ALLOC_IN_VLAN_TAGS_OFST 16
+#define       MC_CMD_VPORT_ALLOC_IN_VLAN_TAGS_LEN 4
 #define        MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_0_LBN 0
 #define        MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_0_WIDTH 16
 #define        MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_1_LBN 16
 #define    MC_CMD_VPORT_ALLOC_OUT_LEN 4
 /* The handle of the new v-port */
 #define       MC_CMD_VPORT_ALLOC_OUT_VPORT_ID_OFST 0
+#define       MC_CMD_VPORT_ALLOC_OUT_VPORT_ID_LEN 4
 
 
 /***********************************/
 #define    MC_CMD_VPORT_FREE_IN_LEN 4
 /* The handle of the v-port */
 #define       MC_CMD_VPORT_FREE_IN_VPORT_ID_OFST 0
+#define       MC_CMD_VPORT_FREE_IN_VPORT_ID_LEN 4
 
 /* MC_CMD_VPORT_FREE_OUT msgresponse */
 #define    MC_CMD_VPORT_FREE_OUT_LEN 0
 #define    MC_CMD_VADAPTOR_ALLOC_IN_LEN 30
 /* The port to connect to the v-adaptor's port. */
 #define       MC_CMD_VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+#define       MC_CMD_VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4
 /* Flags controlling v-adaptor creation */
 #define       MC_CMD_VADAPTOR_ALLOC_IN_FLAGS_OFST 8
+#define       MC_CMD_VADAPTOR_ALLOC_IN_FLAGS_LEN 4
 #define        MC_CMD_VADAPTOR_ALLOC_IN_FLAG_AUTO_VADAPTOR_LBN 0
 #define        MC_CMD_VADAPTOR_ALLOC_IN_FLAG_AUTO_VADAPTOR_WIDTH 1
 #define        MC_CMD_VADAPTOR_ALLOC_IN_FLAG_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 1
 #define        MC_CMD_VADAPTOR_ALLOC_IN_FLAG_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1
 /* The number of VLAN tags to strip on receive */
 #define       MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLANS_OFST 12
+#define       MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLANS_LEN 4
 /* The number of VLAN tags to transparently insert/remove. */
 #define       MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLAN_TAGS_OFST 16
+#define       MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLAN_TAGS_LEN 4
 /* The actual VLAN tags to insert/remove */
 #define       MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAGS_OFST 20
+#define       MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAGS_LEN 4
 #define        MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_0_LBN 0
 #define        MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_0_WIDTH 16
 #define        MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_1_LBN 16
 #define    MC_CMD_VADAPTOR_FREE_IN_LEN 4
 /* The port to which the v-adaptor is connected. */
 #define       MC_CMD_VADAPTOR_FREE_IN_UPSTREAM_PORT_ID_OFST 0
+#define       MC_CMD_VADAPTOR_FREE_IN_UPSTREAM_PORT_ID_LEN 4
 
 /* MC_CMD_VADAPTOR_FREE_OUT msgresponse */
 #define    MC_CMD_VADAPTOR_FREE_OUT_LEN 0
 #define    MC_CMD_VADAPTOR_SET_MAC_IN_LEN 10
 /* The port to which the v-adaptor is connected. */
 #define       MC_CMD_VADAPTOR_SET_MAC_IN_UPSTREAM_PORT_ID_OFST 0
+#define       MC_CMD_VADAPTOR_SET_MAC_IN_UPSTREAM_PORT_ID_LEN 4
 /* The new MAC address to assign to this v-adaptor */
 #define       MC_CMD_VADAPTOR_SET_MAC_IN_MACADDR_OFST 4
 #define       MC_CMD_VADAPTOR_SET_MAC_IN_MACADDR_LEN 6
 #define    MC_CMD_VADAPTOR_GET_MAC_IN_LEN 4
 /* The port to which the v-adaptor is connected. */
 #define       MC_CMD_VADAPTOR_GET_MAC_IN_UPSTREAM_PORT_ID_OFST 0
+#define       MC_CMD_VADAPTOR_GET_MAC_IN_UPSTREAM_PORT_ID_LEN 4
 
 /* MC_CMD_VADAPTOR_GET_MAC_OUT msgresponse */
 #define    MC_CMD_VADAPTOR_GET_MAC_OUT_LEN 6
 #define    MC_CMD_VADAPTOR_QUERY_IN_LEN 4
 /* The port to which the v-adaptor is connected. */
 #define       MC_CMD_VADAPTOR_QUERY_IN_UPSTREAM_PORT_ID_OFST 0
+#define       MC_CMD_VADAPTOR_QUERY_IN_UPSTREAM_PORT_ID_LEN 4
 
 /* MC_CMD_VADAPTOR_QUERY_OUT msgresponse */
 #define    MC_CMD_VADAPTOR_QUERY_OUT_LEN 12
 /* The EVB port flags as defined at MC_CMD_VPORT_ALLOC. */
 #define       MC_CMD_VADAPTOR_QUERY_OUT_PORT_FLAGS_OFST 0
+#define       MC_CMD_VADAPTOR_QUERY_OUT_PORT_FLAGS_LEN 4
 /* The v-adaptor flags as defined at MC_CMD_VADAPTOR_ALLOC. */
 #define       MC_CMD_VADAPTOR_QUERY_OUT_VADAPTOR_FLAGS_OFST 4
+#define       MC_CMD_VADAPTOR_QUERY_OUT_VADAPTOR_FLAGS_LEN 4
 /* The number of VLAN tags that may still be added */
 #define       MC_CMD_VADAPTOR_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_OFST 8
+#define       MC_CMD_VADAPTOR_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_LEN 4
 
 
 /***********************************/
 #define    MC_CMD_EVB_PORT_ASSIGN_IN_LEN 8
 /* The port to assign. */
 #define       MC_CMD_EVB_PORT_ASSIGN_IN_PORT_ID_OFST 0
+#define       MC_CMD_EVB_PORT_ASSIGN_IN_PORT_ID_LEN 4
 /* The target function to modify. */
 #define       MC_CMD_EVB_PORT_ASSIGN_IN_FUNCTION_OFST 4
+#define       MC_CMD_EVB_PORT_ASSIGN_IN_FUNCTION_LEN 4
 #define        MC_CMD_EVB_PORT_ASSIGN_IN_PF_LBN 0
 #define        MC_CMD_EVB_PORT_ASSIGN_IN_PF_WIDTH 16
 #define        MC_CMD_EVB_PORT_ASSIGN_IN_VF_LBN 16
 /* MC_CMD_RDWR_A64_REGIONS_IN msgrequest */
 #define    MC_CMD_RDWR_A64_REGIONS_IN_LEN 17
 #define       MC_CMD_RDWR_A64_REGIONS_IN_REGION0_OFST 0
+#define       MC_CMD_RDWR_A64_REGIONS_IN_REGION0_LEN 4
 #define       MC_CMD_RDWR_A64_REGIONS_IN_REGION1_OFST 4
+#define       MC_CMD_RDWR_A64_REGIONS_IN_REGION1_LEN 4
 #define       MC_CMD_RDWR_A64_REGIONS_IN_REGION2_OFST 8
+#define       MC_CMD_RDWR_A64_REGIONS_IN_REGION2_LEN 4
 #define       MC_CMD_RDWR_A64_REGIONS_IN_REGION3_OFST 12
+#define       MC_CMD_RDWR_A64_REGIONS_IN_REGION3_LEN 4
 /* Write enable bits 0-3, set to write, clear to read. */
 #define       MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_LBN 128
 #define       MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_WIDTH 4
  */
 #define    MC_CMD_RDWR_A64_REGIONS_OUT_LEN 16
 #define       MC_CMD_RDWR_A64_REGIONS_OUT_REGION0_OFST 0
+#define       MC_CMD_RDWR_A64_REGIONS_OUT_REGION0_LEN 4
 #define       MC_CMD_RDWR_A64_REGIONS_OUT_REGION1_OFST 4
+#define       MC_CMD_RDWR_A64_REGIONS_OUT_REGION1_LEN 4
 #define       MC_CMD_RDWR_A64_REGIONS_OUT_REGION2_OFST 8
+#define       MC_CMD_RDWR_A64_REGIONS_OUT_REGION2_LEN 4
 #define       MC_CMD_RDWR_A64_REGIONS_OUT_REGION3_OFST 12
+#define       MC_CMD_RDWR_A64_REGIONS_OUT_REGION3_LEN 4
 
 
 /***********************************/
 #define    MC_CMD_ONLOAD_STACK_ALLOC_IN_LEN 4
 /* The handle of the owning upstream port */
 #define       MC_CMD_ONLOAD_STACK_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+#define       MC_CMD_ONLOAD_STACK_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4
 
 /* MC_CMD_ONLOAD_STACK_ALLOC_OUT msgresponse */
 #define    MC_CMD_ONLOAD_STACK_ALLOC_OUT_LEN 4
 /* The handle of the new Onload stack */
 #define       MC_CMD_ONLOAD_STACK_ALLOC_OUT_ONLOAD_STACK_ID_OFST 0
+#define       MC_CMD_ONLOAD_STACK_ALLOC_OUT_ONLOAD_STACK_ID_LEN 4
 
 
 /***********************************/
 #define    MC_CMD_ONLOAD_STACK_FREE_IN_LEN 4
 /* The handle of the Onload stack */
 #define       MC_CMD_ONLOAD_STACK_FREE_IN_ONLOAD_STACK_ID_OFST 0
+#define       MC_CMD_ONLOAD_STACK_FREE_IN_ONLOAD_STACK_ID_LEN 4
 
 /* MC_CMD_ONLOAD_STACK_FREE_OUT msgresponse */
 #define    MC_CMD_ONLOAD_STACK_FREE_OUT_LEN 0
 #define    MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN 12
 /* The handle of the owning upstream port */
 #define       MC_CMD_RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+#define       MC_CMD_RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4
 /* The type of context to allocate */
 #define       MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_OFST 4
+#define       MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_LEN 4
 /* enum: Allocate a context for exclusive use. The key and indirection table
  * must be explicitly configured.
  */
  * in the indirection table will be in the range 0 to NUM_QUEUES-1.
  */
 #define       MC_CMD_RSS_CONTEXT_ALLOC_IN_NUM_QUEUES_OFST 8
+#define       MC_CMD_RSS_CONTEXT_ALLOC_IN_NUM_QUEUES_LEN 4
 
 /* MC_CMD_RSS_CONTEXT_ALLOC_OUT msgresponse */
 #define    MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN 4
  * handle.
  */
 #define       MC_CMD_RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID_OFST 0
+#define       MC_CMD_RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID_LEN 4
 /* enum: guaranteed invalid RSS context handle value */
 #define          MC_CMD_RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID_INVALID  0xffffffff
 
 #define    MC_CMD_RSS_CONTEXT_FREE_IN_LEN 4
 /* The handle of the RSS context */
 #define       MC_CMD_RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID_OFST 0
+#define       MC_CMD_RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID_LEN 4
 
 /* MC_CMD_RSS_CONTEXT_FREE_OUT msgresponse */
 #define    MC_CMD_RSS_CONTEXT_FREE_OUT_LEN 0
 #define    MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN 44
 /* The handle of the RSS context */
 #define       MC_CMD_RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID_OFST 0
+#define       MC_CMD_RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID_LEN 4
 /* The 40-byte Toeplitz hash key (TBD endianness issues?) */
 #define       MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_OFST 4
 #define       MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN 40
 #define    MC_CMD_RSS_CONTEXT_GET_KEY_IN_LEN 4
 /* The handle of the RSS context */
 #define       MC_CMD_RSS_CONTEXT_GET_KEY_IN_RSS_CONTEXT_ID_OFST 0
+#define       MC_CMD_RSS_CONTEXT_GET_KEY_IN_RSS_CONTEXT_ID_LEN 4
 
 /* MC_CMD_RSS_CONTEXT_GET_KEY_OUT msgresponse */
 #define    MC_CMD_RSS_CONTEXT_GET_KEY_OUT_LEN 44
 #define    MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN 132
 /* The handle of the RSS context */
 #define       MC_CMD_RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID_OFST 0
+#define       MC_CMD_RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID_LEN 4
 /* The 128-byte indirection table (1 byte per entry) */
 #define       MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_OFST 4
 #define       MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_LEN 128
 #define    MC_CMD_RSS_CONTEXT_GET_TABLE_IN_LEN 4
 /* The handle of the RSS context */
 #define       MC_CMD_RSS_CONTEXT_GET_TABLE_IN_RSS_CONTEXT_ID_OFST 0
+#define       MC_CMD_RSS_CONTEXT_GET_TABLE_IN_RSS_CONTEXT_ID_LEN 4
 
 /* MC_CMD_RSS_CONTEXT_GET_TABLE_OUT msgresponse */
 #define    MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_LEN 132
 #define    MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN 8
 /* The handle of the RSS context */
 #define       MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID_OFST 0
+#define       MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID_LEN 4
 /* Hash control flags. The _EN bits are always supported, but new modes are
  * available when ADDITIONAL_RSS_MODES is reported by MC_CMD_GET_CAPABILITIES:
  * in this case, the MODE fields may be set to non-zero values, and will take
  * particular packet type.)
  */
 #define       MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_FLAGS_OFST 4
+#define       MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_FLAGS_LEN 4
 #define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN_LBN 0
 #define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN_WIDTH 1
 #define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV4_EN_LBN 1
 #define    MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_LEN 4
 /* The handle of the RSS context */
 #define       MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_RSS_CONTEXT_ID_OFST 0
+#define       MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_RSS_CONTEXT_ID_LEN 4
 
 /* MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT msgresponse */
 #define    MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN 8
  * always be used for a SET regardless of old/new driver vs. old/new firmware.
  */
 #define       MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_FLAGS_OFST 4
+#define       MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_FLAGS_LEN 4
 #define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_LBN 0
 #define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_WIDTH 1
 #define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV4_EN_LBN 1
 #define    MC_CMD_DOT1P_MAPPING_ALLOC_IN_LEN 8
 /* The handle of the owning upstream port */
 #define       MC_CMD_DOT1P_MAPPING_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+#define       MC_CMD_DOT1P_MAPPING_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4
 /* Number of queues spanned by this mapping, in the range 1-64; valid fixed
  * offsets in the mapping table will be in the range 0 to NUM_QUEUES-1, and
  * referenced RSS contexts must span no more than this number.
  */
 #define       MC_CMD_DOT1P_MAPPING_ALLOC_IN_NUM_QUEUES_OFST 4
+#define       MC_CMD_DOT1P_MAPPING_ALLOC_IN_NUM_QUEUES_LEN 4
 
 /* MC_CMD_DOT1P_MAPPING_ALLOC_OUT msgresponse */
 #define    MC_CMD_DOT1P_MAPPING_ALLOC_OUT_LEN 4
  * handle.
  */
 #define       MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_OFST 0
+#define       MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_LEN 4
 /* enum: guaranteed invalid .1p mapping handle value */
 #define          MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_INVALID  0xffffffff
 
 #define    MC_CMD_DOT1P_MAPPING_FREE_IN_LEN 4
 /* The handle of the .1p mapping */
 #define       MC_CMD_DOT1P_MAPPING_FREE_IN_DOT1P_MAPPING_ID_OFST 0
+#define       MC_CMD_DOT1P_MAPPING_FREE_IN_DOT1P_MAPPING_ID_LEN 4
 
 /* MC_CMD_DOT1P_MAPPING_FREE_OUT msgresponse */
 #define    MC_CMD_DOT1P_MAPPING_FREE_OUT_LEN 0
 #define    MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_LEN 36
 /* The handle of the .1p mapping */
 #define       MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_DOT1P_MAPPING_ID_OFST 0
+#define       MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_DOT1P_MAPPING_ID_LEN 4
 /* Per-priority mappings (1 32-bit word per entry - an offset or RSS context
  * handle)
  */
 #define    MC_CMD_DOT1P_MAPPING_GET_TABLE_IN_LEN 4
 /* The handle of the .1p mapping */
 #define       MC_CMD_DOT1P_MAPPING_GET_TABLE_IN_DOT1P_MAPPING_ID_OFST 0
+#define       MC_CMD_DOT1P_MAPPING_GET_TABLE_IN_DOT1P_MAPPING_ID_LEN 4
 
 /* MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT msgresponse */
 #define    MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT_LEN 36
 #define    MC_CMD_GET_VECTOR_CFG_OUT_LEN 12
 /* Base absolute interrupt vector number. */
 #define       MC_CMD_GET_VECTOR_CFG_OUT_VEC_BASE_OFST 0
+#define       MC_CMD_GET_VECTOR_CFG_OUT_VEC_BASE_LEN 4
 /* Number of interrupt vectors allocate to this PF. */
 #define       MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_PF_OFST 4
+#define       MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_PF_LEN 4
 /* Number of interrupt vectors to allocate per VF. */
 #define       MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_VF_OFST 8
+#define       MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_VF_LEN 4
 
 
 /***********************************/
  * let the system find a suitable base.
  */
 #define       MC_CMD_SET_VECTOR_CFG_IN_VEC_BASE_OFST 0
+#define       MC_CMD_SET_VECTOR_CFG_IN_VEC_BASE_LEN 4
 /* Number of interrupt vectors allocate to this PF. */
 #define       MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_PF_OFST 4
+#define       MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_PF_LEN 4
 /* Number of interrupt vectors to allocate per VF. */
 #define       MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_VF_OFST 8
+#define       MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_VF_LEN 4
 
 /* MC_CMD_SET_VECTOR_CFG_OUT msgresponse */
 #define    MC_CMD_SET_VECTOR_CFG_OUT_LEN 0
 #define    MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_LEN 10
 /* The handle of the v-port */
 #define       MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_VPORT_ID_OFST 0
+#define       MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_VPORT_ID_LEN 4
 /* MAC address to add */
 #define       MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_MACADDR_OFST 4
 #define       MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_MACADDR_LEN 6
 #define    MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_LEN 10
 /* The handle of the v-port */
 #define       MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID_OFST 0
+#define       MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID_LEN 4
 /* MAC address to add */
 #define       MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_MACADDR_OFST 4
 #define       MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_MACADDR_LEN 6
 #define    MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN 4
 /* The handle of the v-port */
 #define       MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID_OFST 0
+#define       MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID_LEN 4
 
 /* MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT msgresponse */
 #define    MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMIN 4
 #define    MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LEN(num) (4+6*(num))
 /* The number of MAC addresses returned */
 #define       MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT_OFST 0
+#define       MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT_LEN 4
 /* Array of MAC addresses */
 #define       MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_OFST 4
 #define       MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_LEN 6
 #define    MC_CMD_VPORT_RECONFIGURE_IN_LEN 44
 /* The handle of the v-port */
 #define       MC_CMD_VPORT_RECONFIGURE_IN_VPORT_ID_OFST 0
+#define       MC_CMD_VPORT_RECONFIGURE_IN_VPORT_ID_LEN 4
 /* Flags requesting what should be changed. */
 #define       MC_CMD_VPORT_RECONFIGURE_IN_FLAGS_OFST 4
+#define       MC_CMD_VPORT_RECONFIGURE_IN_FLAGS_LEN 4
 #define        MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_VLAN_TAGS_LBN 0
 #define        MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_VLAN_TAGS_WIDTH 1
 #define        MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_MACADDRS_LBN 1
  * v-switch.
  */
 #define       MC_CMD_VPORT_RECONFIGURE_IN_NUM_VLAN_TAGS_OFST 8
+#define       MC_CMD_VPORT_RECONFIGURE_IN_NUM_VLAN_TAGS_LEN 4
 /* The actual VLAN tags to insert/remove */
 #define       MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAGS_OFST 12
+#define       MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAGS_LEN 4
 #define        MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_0_LBN 0
 #define        MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_0_WIDTH 16
 #define        MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_1_LBN 16
 #define        MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_1_WIDTH 16
 /* The number of MAC addresses to add */
 #define       MC_CMD_VPORT_RECONFIGURE_IN_NUM_MACADDRS_OFST 16
+#define       MC_CMD_VPORT_RECONFIGURE_IN_NUM_MACADDRS_LEN 4
 /* MAC addresses to add */
 #define       MC_CMD_VPORT_RECONFIGURE_IN_MACADDRS_OFST 20
 #define       MC_CMD_VPORT_RECONFIGURE_IN_MACADDRS_LEN 6
 /* MC_CMD_VPORT_RECONFIGURE_OUT msgresponse */
 #define    MC_CMD_VPORT_RECONFIGURE_OUT_LEN 4
 #define       MC_CMD_VPORT_RECONFIGURE_OUT_FLAGS_OFST 0
+#define       MC_CMD_VPORT_RECONFIGURE_OUT_FLAGS_LEN 4
 #define        MC_CMD_VPORT_RECONFIGURE_OUT_RESET_DONE_LBN 0
 #define        MC_CMD_VPORT_RECONFIGURE_OUT_RESET_DONE_WIDTH 1
 
 #define    MC_CMD_EVB_PORT_QUERY_IN_LEN 4
 /* The handle of the v-port */
 #define       MC_CMD_EVB_PORT_QUERY_IN_PORT_ID_OFST 0
+#define       MC_CMD_EVB_PORT_QUERY_IN_PORT_ID_LEN 4
 
 /* MC_CMD_EVB_PORT_QUERY_OUT msgresponse */
 #define    MC_CMD_EVB_PORT_QUERY_OUT_LEN 8
 /* The EVB port flags as defined at MC_CMD_VPORT_ALLOC. */
 #define       MC_CMD_EVB_PORT_QUERY_OUT_PORT_FLAGS_OFST 0
+#define       MC_CMD_EVB_PORT_QUERY_OUT_PORT_FLAGS_LEN 4
 /* The number of VLAN tags that may be used on a v-adaptor connected to this
  * EVB port.
  */
 #define       MC_CMD_EVB_PORT_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_OFST 4
+#define       MC_CMD_EVB_PORT_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_LEN 4
 
 
 /***********************************/
  */
 #define MC_CMD_DUMP_BUFTBL_ENTRIES 0xab
 
-#define MC_CMD_0xab_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xab_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_DUMP_BUFTBL_ENTRIES_IN msgrequest */
 #define    MC_CMD_DUMP_BUFTBL_ENTRIES_IN_LEN 8
 /* Index of the first buffer table entry. */
 #define       MC_CMD_DUMP_BUFTBL_ENTRIES_IN_FIRSTID_OFST 0
+#define       MC_CMD_DUMP_BUFTBL_ENTRIES_IN_FIRSTID_LEN 4
 /* Number of buffer table entries to dump. */
 #define       MC_CMD_DUMP_BUFTBL_ENTRIES_IN_NUMENTRIES_OFST 4
+#define       MC_CMD_DUMP_BUFTBL_ENTRIES_IN_NUMENTRIES_LEN 4
 
 /* MC_CMD_DUMP_BUFTBL_ENTRIES_OUT msgresponse */
 #define    MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LENMIN 12
 /* MC_CMD_SET_RXDP_CONFIG_IN msgrequest */
 #define    MC_CMD_SET_RXDP_CONFIG_IN_LEN 4
 #define       MC_CMD_SET_RXDP_CONFIG_IN_DATA_OFST 0
+#define       MC_CMD_SET_RXDP_CONFIG_IN_DATA_LEN 4
 #define        MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_DMA_LBN 0
 #define        MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_DMA_WIDTH 1
 #define        MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_LEN_LBN 1
 /* MC_CMD_GET_RXDP_CONFIG_OUT msgresponse */
 #define    MC_CMD_GET_RXDP_CONFIG_OUT_LEN 4
 #define       MC_CMD_GET_RXDP_CONFIG_OUT_DATA_OFST 0
+#define       MC_CMD_GET_RXDP_CONFIG_OUT_DATA_LEN 4
 #define        MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_DMA_LBN 0
 #define        MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_DMA_WIDTH 1
 #define        MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_LEN_LBN 1
 #define    MC_CMD_GET_CLOCK_OUT_LEN 8
 /* System frequency, MHz */
 #define       MC_CMD_GET_CLOCK_OUT_SYS_FREQ_OFST 0
+#define       MC_CMD_GET_CLOCK_OUT_SYS_FREQ_LEN 4
 /* DPCPU frequency, MHz */
 #define       MC_CMD_GET_CLOCK_OUT_DPCPU_FREQ_OFST 4
+#define       MC_CMD_GET_CLOCK_OUT_DPCPU_FREQ_LEN 4
 
 
 /***********************************/
  */
 #define MC_CMD_SET_CLOCK 0xad
 
-#define MC_CMD_0xad_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xad_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_SET_CLOCK_IN msgrequest */
 #define    MC_CMD_SET_CLOCK_IN_LEN 28
 /* Requested frequency in MHz for system clock domain */
 #define       MC_CMD_SET_CLOCK_IN_SYS_FREQ_OFST 0
+#define       MC_CMD_SET_CLOCK_IN_SYS_FREQ_LEN 4
 /* enum: Leave the system clock domain frequency unchanged */
 #define          MC_CMD_SET_CLOCK_IN_SYS_DOMAIN_DONT_CHANGE  0x0
 /* Requested frequency in MHz for inter-core clock domain */
 #define       MC_CMD_SET_CLOCK_IN_ICORE_FREQ_OFST 4
+#define       MC_CMD_SET_CLOCK_IN_ICORE_FREQ_LEN 4
 /* enum: Leave the inter-core clock domain frequency unchanged */
 #define          MC_CMD_SET_CLOCK_IN_ICORE_DOMAIN_DONT_CHANGE  0x0
 /* Requested frequency in MHz for DPCPU clock domain */
 #define       MC_CMD_SET_CLOCK_IN_DPCPU_FREQ_OFST 8
+#define       MC_CMD_SET_CLOCK_IN_DPCPU_FREQ_LEN 4
 /* enum: Leave the DPCPU clock domain frequency unchanged */
 #define          MC_CMD_SET_CLOCK_IN_DPCPU_DOMAIN_DONT_CHANGE  0x0
 /* Requested frequency in MHz for PCS clock domain */
 #define       MC_CMD_SET_CLOCK_IN_PCS_FREQ_OFST 12
+#define       MC_CMD_SET_CLOCK_IN_PCS_FREQ_LEN 4
 /* enum: Leave the PCS clock domain frequency unchanged */
 #define          MC_CMD_SET_CLOCK_IN_PCS_DOMAIN_DONT_CHANGE  0x0
 /* Requested frequency in MHz for MC clock domain */
 #define       MC_CMD_SET_CLOCK_IN_MC_FREQ_OFST 16
+#define       MC_CMD_SET_CLOCK_IN_MC_FREQ_LEN 4
 /* enum: Leave the MC clock domain frequency unchanged */
 #define          MC_CMD_SET_CLOCK_IN_MC_DOMAIN_DONT_CHANGE  0x0
 /* Requested frequency in MHz for rmon clock domain */
 #define       MC_CMD_SET_CLOCK_IN_RMON_FREQ_OFST 20
+#define       MC_CMD_SET_CLOCK_IN_RMON_FREQ_LEN 4
 /* enum: Leave the rmon clock domain frequency unchanged */
 #define          MC_CMD_SET_CLOCK_IN_RMON_DOMAIN_DONT_CHANGE  0x0
 /* Requested frequency in MHz for vswitch clock domain */
 #define       MC_CMD_SET_CLOCK_IN_VSWITCH_FREQ_OFST 24
+#define       MC_CMD_SET_CLOCK_IN_VSWITCH_FREQ_LEN 4
 /* enum: Leave the vswitch clock domain frequency unchanged */
 #define          MC_CMD_SET_CLOCK_IN_VSWITCH_DOMAIN_DONT_CHANGE  0x0
 
 #define    MC_CMD_SET_CLOCK_OUT_LEN 28
 /* Resulting system frequency in MHz */
 #define       MC_CMD_SET_CLOCK_OUT_SYS_FREQ_OFST 0
+#define       MC_CMD_SET_CLOCK_OUT_SYS_FREQ_LEN 4
 /* enum: The system clock domain doesn't exist */
 #define          MC_CMD_SET_CLOCK_OUT_SYS_DOMAIN_UNSUPPORTED  0x0
 /* Resulting inter-core frequency in MHz */
 #define       MC_CMD_SET_CLOCK_OUT_ICORE_FREQ_OFST 4
+#define       MC_CMD_SET_CLOCK_OUT_ICORE_FREQ_LEN 4
 /* enum: The inter-core clock domain doesn't exist / isn't used */
 #define          MC_CMD_SET_CLOCK_OUT_ICORE_DOMAIN_UNSUPPORTED  0x0
 /* Resulting DPCPU frequency in MHz */
 #define       MC_CMD_SET_CLOCK_OUT_DPCPU_FREQ_OFST 8
+#define       MC_CMD_SET_CLOCK_OUT_DPCPU_FREQ_LEN 4
 /* enum: The dpcpu clock domain doesn't exist */
 #define          MC_CMD_SET_CLOCK_OUT_DPCPU_DOMAIN_UNSUPPORTED  0x0
 /* Resulting PCS frequency in MHz */
 #define       MC_CMD_SET_CLOCK_OUT_PCS_FREQ_OFST 12
+#define       MC_CMD_SET_CLOCK_OUT_PCS_FREQ_LEN 4
 /* enum: The PCS clock domain doesn't exist / isn't controlled */
 #define          MC_CMD_SET_CLOCK_OUT_PCS_DOMAIN_UNSUPPORTED  0x0
 /* Resulting MC frequency in MHz */
 #define       MC_CMD_SET_CLOCK_OUT_MC_FREQ_OFST 16
+#define       MC_CMD_SET_CLOCK_OUT_MC_FREQ_LEN 4
 /* enum: The MC clock domain doesn't exist / isn't controlled */
 #define          MC_CMD_SET_CLOCK_OUT_MC_DOMAIN_UNSUPPORTED  0x0
 /* Resulting rmon frequency in MHz */
 #define       MC_CMD_SET_CLOCK_OUT_RMON_FREQ_OFST 20
+#define       MC_CMD_SET_CLOCK_OUT_RMON_FREQ_LEN 4
 /* enum: The rmon clock domain doesn't exist / isn't controlled */
 #define          MC_CMD_SET_CLOCK_OUT_RMON_DOMAIN_UNSUPPORTED  0x0
 /* Resulting vswitch frequency in MHz */
 #define       MC_CMD_SET_CLOCK_OUT_VSWITCH_FREQ_OFST 24
+#define       MC_CMD_SET_CLOCK_OUT_VSWITCH_FREQ_LEN 4
 /* enum: The vswitch clock domain doesn't exist / isn't controlled */
 #define          MC_CMD_SET_CLOCK_OUT_VSWITCH_DOMAIN_UNSUPPORTED  0x0
 
  */
 #define MC_CMD_DPCPU_RPC 0xae
 
-#define MC_CMD_0xae_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xae_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_DPCPU_RPC_IN msgrequest */
 #define    MC_CMD_DPCPU_RPC_IN_LEN 36
 #define       MC_CMD_DPCPU_RPC_IN_CPU_OFST 0
+#define       MC_CMD_DPCPU_RPC_IN_CPU_LEN 4
 /* enum: RxDPCPU0 */
 #define          MC_CMD_DPCPU_RPC_IN_DPCPU_RX0  0x0
 /* enum: TxDPCPU0 */
 #define       MC_CMD_DPCPU_RPC_IN_WDATA_LEN 24
 /* Register data to write. Only valid in write/write-read. */
 #define       MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_DATA_OFST 16
+#define       MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_DATA_LEN 4
 /* Register address. */
 #define       MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_ADDRESS_OFST 20
+#define       MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_ADDRESS_LEN 4
 
 /* MC_CMD_DPCPU_RPC_OUT msgresponse */
 #define    MC_CMD_DPCPU_RPC_OUT_LEN 36
 #define       MC_CMD_DPCPU_RPC_OUT_RC_OFST 0
+#define       MC_CMD_DPCPU_RPC_OUT_RC_LEN 4
 /* DATA */
 #define       MC_CMD_DPCPU_RPC_OUT_DATA_OFST 4
 #define       MC_CMD_DPCPU_RPC_OUT_DATA_LEN 32
 #define       MC_CMD_DPCPU_RPC_OUT_RDATA_OFST 12
 #define       MC_CMD_DPCPU_RPC_OUT_RDATA_LEN 24
 #define       MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_1_OFST 12
+#define       MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_1_LEN 4
 #define       MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_2_OFST 16
+#define       MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_2_LEN 4
 #define       MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_3_OFST 20
+#define       MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_3_LEN 4
 #define       MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_4_OFST 24
+#define       MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_4_LEN 4
 
 
 /***********************************/
 #define    MC_CMD_TRIGGER_INTERRUPT_IN_LEN 4
 /* Interrupt level relative to base for function. */
 #define       MC_CMD_TRIGGER_INTERRUPT_IN_INTR_LEVEL_OFST 0
+#define       MC_CMD_TRIGGER_INTERRUPT_IN_INTR_LEVEL_LEN 4
 
 /* MC_CMD_TRIGGER_INTERRUPT_OUT msgresponse */
 #define    MC_CMD_TRIGGER_INTERRUPT_OUT_LEN 0
 #define    MC_CMD_SHMBOOT_OP_IN_LEN 4
 /* Identifies the operation to perform */
 #define       MC_CMD_SHMBOOT_OP_IN_SHMBOOT_OP_OFST 0
+#define       MC_CMD_SHMBOOT_OP_IN_SHMBOOT_OP_LEN 4
 /* enum: Copy slave_data section to the slave core. (Greenport only) */
 #define          MC_CMD_SHMBOOT_OP_IN_PUSH_SLAVE_DATA  0x0
 
  */
 #define MC_CMD_CAP_BLK_READ 0xe7
 
-#define MC_CMD_0xe7_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xe7_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_CAP_BLK_READ_IN msgrequest */
 #define    MC_CMD_CAP_BLK_READ_IN_LEN 12
 #define       MC_CMD_CAP_BLK_READ_IN_CAP_REG_OFST 0
+#define       MC_CMD_CAP_BLK_READ_IN_CAP_REG_LEN 4
 #define       MC_CMD_CAP_BLK_READ_IN_ADDR_OFST 4
+#define       MC_CMD_CAP_BLK_READ_IN_ADDR_LEN 4
 #define       MC_CMD_CAP_BLK_READ_IN_COUNT_OFST 8
+#define       MC_CMD_CAP_BLK_READ_IN_COUNT_LEN 4
 
 /* MC_CMD_CAP_BLK_READ_OUT msgresponse */
 #define    MC_CMD_CAP_BLK_READ_OUT_LENMIN 8
  */
 #define MC_CMD_DUMP_DO 0xe8
 
-#define MC_CMD_0xe8_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xe8_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_DUMP_DO_IN msgrequest */
 #define    MC_CMD_DUMP_DO_IN_LEN 52
 #define       MC_CMD_DUMP_DO_IN_PADDING_OFST 0
+#define       MC_CMD_DUMP_DO_IN_PADDING_LEN 4
 #define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_OFST 4
+#define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_LEN 4
 #define          MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM  0x0 /* enum */
 #define          MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_DEFAULT  0x1 /* enum */
 #define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_TYPE_OFST 8
+#define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_TYPE_LEN 4
 #define          MC_CMD_DUMP_DO_IN_DUMP_LOCATION_NVRAM  0x1 /* enum */
 #define          MC_CMD_DUMP_DO_IN_DUMP_LOCATION_HOST_MEMORY  0x2 /* enum */
 #define          MC_CMD_DUMP_DO_IN_DUMP_LOCATION_HOST_MEMORY_MLI  0x3 /* enum */
 #define          MC_CMD_DUMP_DO_IN_DUMP_LOCATION_UART  0x4 /* enum */
 #define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 12
+#define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_PARTITION_TYPE_ID_LEN 4
 #define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_OFFSET_OFST 16
+#define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_OFFSET_LEN 4
 #define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 12
+#define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_LO_LEN 4
 #define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 16
+#define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_HI_LEN 4
 #define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 12
+#define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_LEN 4
 #define          MC_CMD_DUMP_DO_IN_HOST_MEMORY_MLI_PAGE_SIZE  0x1000 /* enum */
 #define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 16
+#define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_LEN 4
 #define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 20
+#define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_DEPTH_LEN 4
 #define          MC_CMD_DUMP_DO_IN_HOST_MEMORY_MLI_MAX_DEPTH  0x2 /* enum */
 #define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_UART_PORT_OFST 12
+#define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_UART_PORT_LEN 4
 /* enum: The uart port this command was received over (if using a uart
  * transport)
  */
 #define          MC_CMD_DUMP_DO_IN_UART_PORT_SRC  0xff
 #define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_SIZE_OFST 24
+#define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_SIZE_LEN 4
 #define       MC_CMD_DUMP_DO_IN_DUMPFILE_DST_OFST 28
+#define       MC_CMD_DUMP_DO_IN_DUMPFILE_DST_LEN 4
 #define          MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM  0x0 /* enum */
 #define          MC_CMD_DUMP_DO_IN_DUMPFILE_DST_NVRAM_DUMP_PARTITION  0x1 /* enum */
 #define       MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_TYPE_OFST 32
+#define       MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_TYPE_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC_CUSTOM_TYPE */
 #define       MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 36
+#define       MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_NVRAM_PARTITION_TYPE_ID_LEN 4
 #define       MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_NVRAM_OFFSET_OFST 40
+#define       MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_NVRAM_OFFSET_LEN 4
 #define       MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 36
+#define       MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_LO_LEN 4
 #define       MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 40
+#define       MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_HI_LEN 4
 #define       MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 36
+#define       MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_LEN 4
 #define       MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 40
+#define       MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_LEN 4
 #define       MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 44
+#define       MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_DEPTH_LEN 4
 #define       MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_UART_PORT_OFST 36
+#define       MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_UART_PORT_LEN 4
 #define       MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_SIZE_OFST 48
+#define       MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_SIZE_LEN 4
 
 /* MC_CMD_DUMP_DO_OUT msgresponse */
 #define    MC_CMD_DUMP_DO_OUT_LEN 4
 #define       MC_CMD_DUMP_DO_OUT_DUMPFILE_SIZE_OFST 0
+#define       MC_CMD_DUMP_DO_OUT_DUMPFILE_SIZE_LEN 4
 
 
 /***********************************/
  */
 #define MC_CMD_DUMP_CONFIGURE_UNSOLICITED 0xe9
 
-#define MC_CMD_0xe9_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xe9_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN msgrequest */
 #define    MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_LEN 52
 #define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_ENABLE_OFST 0
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_ENABLE_LEN 4
 #define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_OFST 4
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC */
 #define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_TYPE_OFST 8
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_TYPE_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC_CUSTOM_TYPE */
 #define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 12
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_PARTITION_TYPE_ID_LEN 4
 #define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_OFFSET_OFST 16
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_OFFSET_LEN 4
 #define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 12
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_LO_LEN 4
 #define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 16
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_HI_LEN 4
 #define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 12
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_LEN 4
 #define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 16
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_LEN 4
 #define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 20
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_DEPTH_LEN 4
 #define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_UART_PORT_OFST 12
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_UART_PORT_LEN 4
 #define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_SIZE_OFST 24
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_SIZE_LEN 4
 #define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_OFST 28
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPFILE_DST */
 #define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_TYPE_OFST 32
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_TYPE_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC_CUSTOM_TYPE */
 #define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 36
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_NVRAM_PARTITION_TYPE_ID_LEN 4
 #define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_NVRAM_OFFSET_OFST 40
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_NVRAM_OFFSET_LEN 4
 #define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 36
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_LO_LEN 4
 #define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 40
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_HI_LEN 4
 #define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 36
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_LEN 4
 #define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 40
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_LEN 4
 #define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 44
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_DEPTH_LEN 4
 #define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_UART_PORT_OFST 36
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_UART_PORT_LEN 4
 #define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_SIZE_OFST 48
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_SIZE_LEN 4
 
 
 /***********************************/
  */
 #define MC_CMD_SET_PSU 0xea
 
-#define MC_CMD_0xea_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xea_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_SET_PSU_IN msgrequest */
 #define    MC_CMD_SET_PSU_IN_LEN 12
 #define       MC_CMD_SET_PSU_IN_PARAM_OFST 0
+#define       MC_CMD_SET_PSU_IN_PARAM_LEN 4
 #define          MC_CMD_SET_PSU_IN_PARAM_SUPPLY_VOLTAGE  0x0 /* enum */
 #define       MC_CMD_SET_PSU_IN_RAIL_OFST 4
+#define       MC_CMD_SET_PSU_IN_RAIL_LEN 4
 #define          MC_CMD_SET_PSU_IN_RAIL_0V9  0x0 /* enum */
 #define          MC_CMD_SET_PSU_IN_RAIL_1V2  0x1 /* enum */
 /* desired value, eg voltage in mV */
 #define       MC_CMD_SET_PSU_IN_VALUE_OFST 8
+#define       MC_CMD_SET_PSU_IN_VALUE_LEN 4
 
 /* MC_CMD_SET_PSU_OUT msgresponse */
 #define    MC_CMD_SET_PSU_OUT_LEN 0
 /* MC_CMD_GET_FUNCTION_INFO_OUT msgresponse */
 #define    MC_CMD_GET_FUNCTION_INFO_OUT_LEN 8
 #define       MC_CMD_GET_FUNCTION_INFO_OUT_PF_OFST 0
+#define       MC_CMD_GET_FUNCTION_INFO_OUT_PF_LEN 4
 #define       MC_CMD_GET_FUNCTION_INFO_OUT_VF_OFST 4
+#define       MC_CMD_GET_FUNCTION_INFO_OUT_VF_LEN 4
 
 
 /***********************************/
 #define    MC_CMD_UART_SEND_DATA_OUT_LEN(num) (16+1*(num))
 /* CRC32 over OFFSET, LENGTH, RESERVED, DATA */
 #define       MC_CMD_UART_SEND_DATA_OUT_CHECKSUM_OFST 0
+#define       MC_CMD_UART_SEND_DATA_OUT_CHECKSUM_LEN 4
 /* Offset at which to write the data */
 #define       MC_CMD_UART_SEND_DATA_OUT_OFFSET_OFST 4
+#define       MC_CMD_UART_SEND_DATA_OUT_OFFSET_LEN 4
 /* Length of data */
 #define       MC_CMD_UART_SEND_DATA_OUT_LENGTH_OFST 8
+#define       MC_CMD_UART_SEND_DATA_OUT_LENGTH_LEN 4
 /* Reserved for future use */
 #define       MC_CMD_UART_SEND_DATA_OUT_RESERVED_OFST 12
+#define       MC_CMD_UART_SEND_DATA_OUT_RESERVED_LEN 4
 #define       MC_CMD_UART_SEND_DATA_OUT_DATA_OFST 16
 #define       MC_CMD_UART_SEND_DATA_OUT_DATA_LEN 1
 #define       MC_CMD_UART_SEND_DATA_OUT_DATA_MINNUM 0
 #define    MC_CMD_UART_RECV_DATA_OUT_LEN 16
 /* CRC32 over OFFSET, LENGTH, RESERVED */
 #define       MC_CMD_UART_RECV_DATA_OUT_CHECKSUM_OFST 0
+#define       MC_CMD_UART_RECV_DATA_OUT_CHECKSUM_LEN 4
 /* Offset from which to read the data */
 #define       MC_CMD_UART_RECV_DATA_OUT_OFFSET_OFST 4
+#define       MC_CMD_UART_RECV_DATA_OUT_OFFSET_LEN 4
 /* Length of data */
 #define       MC_CMD_UART_RECV_DATA_OUT_LENGTH_OFST 8
+#define       MC_CMD_UART_RECV_DATA_OUT_LENGTH_LEN 4
 /* Reserved for future use */
 #define       MC_CMD_UART_RECV_DATA_OUT_RESERVED_OFST 12
+#define       MC_CMD_UART_RECV_DATA_OUT_RESERVED_LEN 4
 
 /* MC_CMD_UART_RECV_DATA_IN msgresponse */
 #define    MC_CMD_UART_RECV_DATA_IN_LENMIN 16
 #define    MC_CMD_UART_RECV_DATA_IN_LEN(num) (16+1*(num))
 /* CRC32 over RESERVED1, RESERVED2, RESERVED3, DATA */
 #define       MC_CMD_UART_RECV_DATA_IN_CHECKSUM_OFST 0
+#define       MC_CMD_UART_RECV_DATA_IN_CHECKSUM_LEN 4
 /* Offset at which to write the data */
 #define       MC_CMD_UART_RECV_DATA_IN_RESERVED1_OFST 4
+#define       MC_CMD_UART_RECV_DATA_IN_RESERVED1_LEN 4
 /* Length of data */
 #define       MC_CMD_UART_RECV_DATA_IN_RESERVED2_OFST 8
+#define       MC_CMD_UART_RECV_DATA_IN_RESERVED2_LEN 4
 /* Reserved for future use */
 #define       MC_CMD_UART_RECV_DATA_IN_RESERVED3_OFST 12
+#define       MC_CMD_UART_RECV_DATA_IN_RESERVED3_LEN 4
 #define       MC_CMD_UART_RECV_DATA_IN_DATA_OFST 16
 #define       MC_CMD_UART_RECV_DATA_IN_DATA_LEN 1
 #define       MC_CMD_UART_RECV_DATA_IN_DATA_MINNUM 0
  */
 #define MC_CMD_READ_FUSES 0xf0
 
-#define MC_CMD_0xf0_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xf0_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_READ_FUSES_IN msgrequest */
 #define    MC_CMD_READ_FUSES_IN_LEN 8
 /* Offset in OTP to read */
 #define       MC_CMD_READ_FUSES_IN_OFFSET_OFST 0
+#define       MC_CMD_READ_FUSES_IN_OFFSET_LEN 4
 /* Length of data to read in bytes */
 #define       MC_CMD_READ_FUSES_IN_LENGTH_OFST 4
+#define       MC_CMD_READ_FUSES_IN_LENGTH_LEN 4
 
 /* MC_CMD_READ_FUSES_OUT msgresponse */
 #define    MC_CMD_READ_FUSES_OUT_LENMIN 4
 #define    MC_CMD_READ_FUSES_OUT_LEN(num) (4+1*(num))
 /* Length of returned OTP data in bytes */
 #define       MC_CMD_READ_FUSES_OUT_LENGTH_OFST 0
+#define       MC_CMD_READ_FUSES_OUT_LENGTH_LEN 4
 /* Returned data */
 #define       MC_CMD_READ_FUSES_OUT_DATA_OFST 4
 #define       MC_CMD_READ_FUSES_OUT_DATA_LEN 1
 #define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_EQC  0x9
 /* enum: CTLE EQ Resistor (0-7, Medford) */
 #define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_EQRES  0xa
+/* enum: CTLE gain (0-31, Medford2) */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_GAIN  0xb
+/* enum: CTLE pole (0-31, Medford2) */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_POLE  0xc
+/* enum: CTLE peaking (0-31, Medford2) */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_PEAK  0xd
+/* enum: DFE Tap1 - even path (Medford2 - 6 bit signed (-29 - +29)) */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP1_EVEN  0xe
+/* enum: DFE Tap1 - odd path (Medford2 - 6 bit signed (-29 - +29)) */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP1_ODD  0xf
+/* enum: DFE Tap2 (Medford2 - 6 bit signed (-20 - +20)) */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP2  0x10
+/* enum: DFE Tap3 (Medford2 - 6 bit signed (-20 - +20)) */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP3  0x11
+/* enum: DFE Tap4 (Medford2 - 6 bit signed (-20 - +20)) */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP4  0x12
+/* enum: DFE Tap5 (Medford2 - 6 bit signed (-24 - +24)) */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP5  0x13
+/* enum: DFE Tap6 (Medford2 - 6 bit signed (-24 - +24)) */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP6  0x14
+/* enum: DFE Tap7 (Medford2 - 6 bit signed (-24 - +24)) */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP7  0x15
+/* enum: DFE Tap8 (Medford2 - 6 bit signed (-24 - +24)) */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP8  0x16
+/* enum: DFE Tap9 (Medford2 - 6 bit signed (-24 - +24)) */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP9  0x17
+/* enum: DFE Tap10 (Medford2 - 6 bit signed (-24 - +24)) */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP10  0x18
+/* enum: DFE Tap11 (Medford2 - 6 bit signed (-24 - +24)) */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP11  0x19
+/* enum: DFE Tap12 (Medford2 - 6 bit signed (-24 - +24)) */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP12  0x1a
+/* enum: I/Q clk offset (Medford2 - 4 bit signed (-5 - +5))) */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_IQ_OFF  0x1b
+/* enum: Negative h1 polarity data sampler offset calibration code, even path
+ * (Medford2 - 6 bit signed (-29 - +29)))
+ */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_H1N_OFF_EVEN  0x1c
+/* enum: Negative h1 polarity data sampler offset calibration code, odd path
+ * (Medford2 - 6 bit signed (-29 - +29)))
+ */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_H1N_OFF_ODD  0x1d
+/* enum: Positive h1 polarity data sampler offset calibration code, even path
+ * (Medford2 - 6 bit signed (-29 - +29)))
+ */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_H1P_OFF_EVEN  0x1e
+/* enum: Positive h1 polarity data sampler offset calibration code, odd path
+ * (Medford2 - 6 bit signed (-29 - +29)))
+ */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_H1P_OFF_ODD  0x1f
+/* enum: CDR calibration loop code (Medford2) */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_CDR_PVT  0x20
+/* enum: CDR integral loop code (Medford2) */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_CDR_INTEG  0x21
 #define        MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LANE_LBN 8
 #define        MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LANE_WIDTH 3
 #define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_0  0x0 /* enum */
 #define       MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_MAXNUM 63
 #define        MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_ID_LBN 0
 #define        MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_ID_WIDTH 8
-/* enum: TX Amplitude (Huntington, Medford) */
+/* enum: TX Amplitude (Huntington, Medford, Medford2) */
 #define          MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_LEV  0x0
 /* enum: De-Emphasis Tap1 Magnitude (0-7) (Huntington) */
 #define          MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_MODE  0x1
 #define          MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_RT_SET  0x9
 /* enum: TX Amplitude Fine control (Medford) */
 #define          MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_LEV_FINE  0xa
-/* enum: Pre-shoot Tap (Medford) */
+/* enum: Pre-shoot Tap (Medford, Medford2) */
 #define          MC_CMD_KR_TUNE_TXEQ_GET_OUT_TAP_ADV  0xb
-/* enum: De-emphasis Tap (Medford) */
+/* enum: De-emphasis Tap (Medford, Medford2) */
 #define          MC_CMD_KR_TUNE_TXEQ_GET_OUT_TAP_DLY  0xc
 #define        MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LANE_LBN 8
 #define        MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LANE_WIDTH 3
 /* Align the arguments to 32 bits */
 #define       MC_CMD_KR_TUNE_START_EYE_PLOT_IN_KR_TUNE_RSVD_OFST 1
 #define       MC_CMD_KR_TUNE_START_EYE_PLOT_IN_KR_TUNE_RSVD_LEN 3
+/* Port-relative lane to scan eye on */
 #define       MC_CMD_KR_TUNE_START_EYE_PLOT_IN_LANE_OFST 4
+#define       MC_CMD_KR_TUNE_START_EYE_PLOT_IN_LANE_LEN 4
+
+/* MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN msgrequest */
+#define    MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_LEN 12
+/* Requested operation */
+#define       MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_KR_TUNE_OP_OFST 0
+#define       MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define       MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_KR_TUNE_RSVD_OFST 1
+#define       MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_KR_TUNE_RSVD_LEN 3
+/* Port-relative lane to scan eye on */
+#define       MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_LANE_OFST 4
+#define       MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_LANE_LEN 4
+/* Scan duration / cycle count */
+#define       MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_BER_OFST 8
+#define       MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_BER_LEN 4
 
 /* MC_CMD_KR_TUNE_START_EYE_PLOT_OUT msgresponse */
 #define    MC_CMD_KR_TUNE_START_EYE_PLOT_OUT_LEN 0
 #define       MC_CMD_KR_TUNE_READ_FOM_IN_KR_TUNE_RSVD_OFST 1
 #define       MC_CMD_KR_TUNE_READ_FOM_IN_KR_TUNE_RSVD_LEN 3
 #define       MC_CMD_KR_TUNE_READ_FOM_IN_LANE_OFST 4
+#define       MC_CMD_KR_TUNE_READ_FOM_IN_LANE_LEN 4
 
 /* MC_CMD_KR_TUNE_READ_FOM_OUT msgresponse */
 #define    MC_CMD_KR_TUNE_READ_FOM_OUT_LEN 4
 #define       MC_CMD_KR_TUNE_READ_FOM_OUT_FOM_OFST 0
+#define       MC_CMD_KR_TUNE_READ_FOM_OUT_FOM_LEN 4
 
 
 /***********************************/
 #define       MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_RSVD_OFST 1
 #define       MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_RSVD_LEN 3
 #define       MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_LANE_OFST 4
+#define       MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_LANE_LEN 4
 
 /* MC_CMD_PCIE_TUNE_START_EYE_PLOT_OUT msgresponse */
 #define    MC_CMD_PCIE_TUNE_START_EYE_PLOT_OUT_LEN 0
 #define    MC_CMD_LICENSING_IN_LEN 4
 /* identifies the type of operation requested */
 #define       MC_CMD_LICENSING_IN_OP_OFST 0
+#define       MC_CMD_LICENSING_IN_OP_LEN 4
 /* enum: re-read and apply licenses after a license key partition update; note
  * that this operation returns a zero-length response
  */
 #define    MC_CMD_LICENSING_OUT_LEN 28
 /* count of application keys which are valid */
 #define       MC_CMD_LICENSING_OUT_VALID_APP_KEYS_OFST 0
+#define       MC_CMD_LICENSING_OUT_VALID_APP_KEYS_LEN 4
 /* sum of UNVERIFIABLE_APP_KEYS + WRONG_NODE_APP_KEYS (for compatibility with
  * MC_CMD_FC_OP_LICENSE)
  */
 #define       MC_CMD_LICENSING_OUT_INVALID_APP_KEYS_OFST 4
+#define       MC_CMD_LICENSING_OUT_INVALID_APP_KEYS_LEN 4
 /* count of application keys which are invalid due to being blacklisted */
 #define       MC_CMD_LICENSING_OUT_BLACKLISTED_APP_KEYS_OFST 8
+#define       MC_CMD_LICENSING_OUT_BLACKLISTED_APP_KEYS_LEN 4
 /* count of application keys which are invalid due to being unverifiable */
 #define       MC_CMD_LICENSING_OUT_UNVERIFIABLE_APP_KEYS_OFST 12
+#define       MC_CMD_LICENSING_OUT_UNVERIFIABLE_APP_KEYS_LEN 4
 /* count of application keys which are invalid due to being for the wrong node
  */
 #define       MC_CMD_LICENSING_OUT_WRONG_NODE_APP_KEYS_OFST 16
+#define       MC_CMD_LICENSING_OUT_WRONG_NODE_APP_KEYS_LEN 4
 /* licensing state (for diagnostics; the exact meaning of the bits in this
  * field are private to the firmware)
  */
 #define       MC_CMD_LICENSING_OUT_LICENSING_STATE_OFST 20
+#define       MC_CMD_LICENSING_OUT_LICENSING_STATE_LEN 4
 /* licensing subsystem self-test report (for manftest) */
 #define       MC_CMD_LICENSING_OUT_LICENSING_SELF_TEST_OFST 24
+#define       MC_CMD_LICENSING_OUT_LICENSING_SELF_TEST_LEN 4
 /* enum: licensing subsystem self-test failed */
 #define          MC_CMD_LICENSING_OUT_SELF_TEST_FAIL  0x0
 /* enum: licensing subsystem self-test passed */
 #define    MC_CMD_LICENSING_V3_IN_LEN 4
 /* identifies the type of operation requested */
 #define       MC_CMD_LICENSING_V3_IN_OP_OFST 0
+#define       MC_CMD_LICENSING_V3_IN_OP_LEN 4
 /* enum: re-read and apply licenses after a license key partition update; note
  * that this operation returns a zero-length response
  */
 #define    MC_CMD_LICENSING_V3_OUT_LEN 88
 /* count of keys which are valid */
 #define       MC_CMD_LICENSING_V3_OUT_VALID_KEYS_OFST 0
+#define       MC_CMD_LICENSING_V3_OUT_VALID_KEYS_LEN 4
 /* sum of UNVERIFIABLE_KEYS + WRONG_NODE_KEYS (for compatibility with
  * MC_CMD_FC_OP_LICENSE)
  */
 #define       MC_CMD_LICENSING_V3_OUT_INVALID_KEYS_OFST 4
+#define       MC_CMD_LICENSING_V3_OUT_INVALID_KEYS_LEN 4
 /* count of keys which are invalid due to being unverifiable */
 #define       MC_CMD_LICENSING_V3_OUT_UNVERIFIABLE_KEYS_OFST 8
+#define       MC_CMD_LICENSING_V3_OUT_UNVERIFIABLE_KEYS_LEN 4
 /* count of keys which are invalid due to being for the wrong node */
 #define       MC_CMD_LICENSING_V3_OUT_WRONG_NODE_KEYS_OFST 12
+#define       MC_CMD_LICENSING_V3_OUT_WRONG_NODE_KEYS_LEN 4
 /* licensing state (for diagnostics; the exact meaning of the bits in this
  * field are private to the firmware)
  */
 #define       MC_CMD_LICENSING_V3_OUT_LICENSING_STATE_OFST 16
+#define       MC_CMD_LICENSING_V3_OUT_LICENSING_STATE_LEN 4
 /* licensing subsystem self-test report (for manftest) */
 #define       MC_CMD_LICENSING_V3_OUT_LICENSING_SELF_TEST_OFST 20
+#define       MC_CMD_LICENSING_V3_OUT_LICENSING_SELF_TEST_LEN 4
 /* enum: licensing subsystem self-test failed */
 #define          MC_CMD_LICENSING_V3_OUT_SELF_TEST_FAIL  0x0
 /* enum: licensing subsystem self-test passed */
 #define    MC_CMD_LICENSING_GET_ID_V3_OUT_LEN(num) (8+1*(num))
 /* type of license (eg 3) */
 #define       MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_TYPE_OFST 0
+#define       MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_TYPE_LEN 4
 /* length of the license ID (in bytes) */
 #define       MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_LENGTH_OFST 4
+#define       MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_LENGTH_LEN 4
 /* the unique license ID of the adapter */
 #define       MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_OFST 8
 #define       MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_LEN 1
 #define    MC_CMD_GET_LICENSED_APP_STATE_IN_LEN 4
 /* application ID to query (LICENSED_APP_ID_xxx) */
 #define       MC_CMD_GET_LICENSED_APP_STATE_IN_APP_ID_OFST 0
+#define       MC_CMD_GET_LICENSED_APP_STATE_IN_APP_ID_LEN 4
 
 /* MC_CMD_GET_LICENSED_APP_STATE_OUT msgresponse */
 #define    MC_CMD_GET_LICENSED_APP_STATE_OUT_LEN 4
 /* state of this application */
 #define       MC_CMD_GET_LICENSED_APP_STATE_OUT_STATE_OFST 0
+#define       MC_CMD_GET_LICENSED_APP_STATE_OUT_STATE_LEN 4
 /* enum: no (or invalid) license is present for the application */
 #define          MC_CMD_GET_LICENSED_APP_STATE_OUT_NOT_LICENSED  0x0
 /* enum: a valid license is present for the application */
 #define    MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_LEN 4
 /* state of this application */
 #define       MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_STATE_OFST 0
+#define       MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_STATE_LEN 4
 /* enum: no (or invalid) license is present for the application */
 #define          MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_NOT_LICENSED  0x0
 /* enum: a valid license is present for the application */
 #define    MC_CMD_LICENSED_APP_OP_IN_LEN(num) (8+4*(num))
 /* application ID */
 #define       MC_CMD_LICENSED_APP_OP_IN_APP_ID_OFST 0
+#define       MC_CMD_LICENSED_APP_OP_IN_APP_ID_LEN 4
 /* the type of operation requested */
 #define       MC_CMD_LICENSED_APP_OP_IN_OP_OFST 4
+#define       MC_CMD_LICENSED_APP_OP_IN_OP_LEN 4
 /* enum: validate application */
 #define          MC_CMD_LICENSED_APP_OP_IN_OP_VALIDATE  0x0
 /* enum: mask application */
 #define    MC_CMD_LICENSED_APP_OP_VALIDATE_IN_LEN 72
 /* application ID */
 #define       MC_CMD_LICENSED_APP_OP_VALIDATE_IN_APP_ID_OFST 0
+#define       MC_CMD_LICENSED_APP_OP_VALIDATE_IN_APP_ID_LEN 4
 /* the type of operation requested */
 #define       MC_CMD_LICENSED_APP_OP_VALIDATE_IN_OP_OFST 4
+#define       MC_CMD_LICENSED_APP_OP_VALIDATE_IN_OP_LEN 4
 /* validation challenge */
 #define       MC_CMD_LICENSED_APP_OP_VALIDATE_IN_CHALLENGE_OFST 8
 #define       MC_CMD_LICENSED_APP_OP_VALIDATE_IN_CHALLENGE_LEN 64
 #define    MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_LEN 68
 /* feature expiry (time_t) */
 #define       MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_EXPIRY_OFST 0
+#define       MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_EXPIRY_LEN 4
 /* validation response */
 #define       MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_RESPONSE_OFST 4
 #define       MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_RESPONSE_LEN 64
 #define    MC_CMD_LICENSED_APP_OP_MASK_IN_LEN 12
 /* application ID */
 #define       MC_CMD_LICENSED_APP_OP_MASK_IN_APP_ID_OFST 0
+#define       MC_CMD_LICENSED_APP_OP_MASK_IN_APP_ID_LEN 4
 /* the type of operation requested */
 #define       MC_CMD_LICENSED_APP_OP_MASK_IN_OP_OFST 4
+#define       MC_CMD_LICENSED_APP_OP_MASK_IN_OP_LEN 4
 /* flag */
 #define       MC_CMD_LICENSED_APP_OP_MASK_IN_FLAG_OFST 8
+#define       MC_CMD_LICENSED_APP_OP_MASK_IN_FLAG_LEN 4
 
 /* MC_CMD_LICENSED_APP_OP_MASK_OUT msgresponse */
 #define    MC_CMD_LICENSED_APP_OP_MASK_OUT_LEN 0
 #define       MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_RESPONSE_LEN 96
 /* application expiry time */
 #define       MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_TIME_OFST 96
+#define       MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_TIME_LEN 4
 /* application expiry units */
 #define       MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNITS_OFST 100
+#define       MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNITS_LEN 4
 /* enum: expiry units are accounting units */
 #define          MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNIT_ACC  0x0
 /* enum: expiry units are calendar days */
  */
 #define MC_CMD_LICENSED_V3_MASK_FEATURES 0xd5
 
-#define MC_CMD_0xd5_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+#define MC_CMD_0xd5_PRIVILEGE_CTG SRIOV_CTG_ADMIN
 
 /* MC_CMD_LICENSED_V3_MASK_FEATURES_IN msgrequest */
 #define    MC_CMD_LICENSED_V3_MASK_FEATURES_IN_LEN 12
 #define       MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_HI_OFST 4
 /* whether to turn on or turn off the masked features */
 #define       MC_CMD_LICENSED_V3_MASK_FEATURES_IN_FLAG_OFST 8
+#define       MC_CMD_LICENSED_V3_MASK_FEATURES_IN_FLAG_LEN 4
 /* enum: turn the features off */
 #define          MC_CMD_LICENSED_V3_MASK_FEATURES_IN_OFF  0x0
 /* enum: turn the features back on */
  */
 #define MC_CMD_LICENSING_V3_TEMPORARY 0xd6
 
-#define MC_CMD_0xd6_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+#define MC_CMD_0xd6_PRIVILEGE_CTG SRIOV_CTG_ADMIN
 
 /* MC_CMD_LICENSING_V3_TEMPORARY_IN msgrequest */
 #define    MC_CMD_LICENSING_V3_TEMPORARY_IN_LEN 4
 /* operation code */
 #define       MC_CMD_LICENSING_V3_TEMPORARY_IN_OP_OFST 0
+#define       MC_CMD_LICENSING_V3_TEMPORARY_IN_OP_LEN 4
 /* enum: install a new license, overwriting any existing temporary license.
  * This is an asynchronous operation owing to the time taken to validate an
  * ECDSA license
 /* MC_CMD_LICENSING_V3_TEMPORARY_IN_SET msgrequest */
 #define    MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_LEN 164
 #define       MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_OP_OFST 0
+#define       MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_OP_LEN 4
 /* ECDSA license and signature */
 #define       MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_LICENSE_OFST 4
 #define       MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_LICENSE_LEN 160
 /* MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR msgrequest */
 #define    MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR_LEN 4
 #define       MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR_OP_OFST 0
+#define       MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR_OP_LEN 4
 
 /* MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS msgrequest */
 #define    MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS_LEN 4
 #define       MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS_OP_OFST 0
+#define       MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS_OP_LEN 4
 
 /* MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS msgresponse */
 #define    MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LEN 12
 /* status code */
 #define       MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_STATUS_OFST 0
+#define       MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_STATUS_LEN 4
 /* enum: finished validating and installing license */
 #define          MC_CMD_LICENSING_V3_TEMPORARY_STATUS_OK  0x0
 /* enum: license validation and installation in progress */
 #define    MC_CMD_SET_PORT_SNIFF_CONFIG_IN_LEN 16
 /* configuration flags */
 #define       MC_CMD_SET_PORT_SNIFF_CONFIG_IN_FLAGS_OFST 0
+#define       MC_CMD_SET_PORT_SNIFF_CONFIG_IN_FLAGS_LEN 4
 #define        MC_CMD_SET_PORT_SNIFF_CONFIG_IN_ENABLE_LBN 0
 #define        MC_CMD_SET_PORT_SNIFF_CONFIG_IN_ENABLE_WIDTH 1
 #define        MC_CMD_SET_PORT_SNIFF_CONFIG_IN_PROMISCUOUS_LBN 1
 #define        MC_CMD_SET_PORT_SNIFF_CONFIG_IN_PROMISCUOUS_WIDTH 1
 /* receive queue handle (for RSS mode, this is the base queue) */
 #define       MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_QUEUE_OFST 4
+#define       MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_QUEUE_LEN 4
 /* receive mode */
 #define       MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_OFST 8
+#define       MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_LEN 4
 /* enum: receive to just the specified queue */
 #define          MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_SIMPLE  0x0
 /* enum: receive to multiple queues using RSS context */
  * of 0xFFFFFFFF is guaranteed never to be a valid handle.
  */
 #define       MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_OFST 12
+#define       MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_LEN 4
 
 /* MC_CMD_SET_PORT_SNIFF_CONFIG_OUT msgresponse */
 #define    MC_CMD_SET_PORT_SNIFF_CONFIG_OUT_LEN 0
 #define    MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_LEN 16
 /* configuration flags */
 #define       MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_FLAGS_OFST 0
+#define       MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_FLAGS_LEN 4
 #define        MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_ENABLE_LBN 0
 #define        MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_ENABLE_WIDTH 1
 #define        MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_PROMISCUOUS_LBN 1
 #define        MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_PROMISCUOUS_WIDTH 1
 /* receiving queue handle (for RSS mode, this is the base queue) */
 #define       MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_OFST 4
+#define       MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_LEN 4
 /* receive mode */
 #define       MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_OFST 8
+#define       MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_LEN 4
 /* enum: receiving to just the specified queue */
 #define          MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_SIMPLE  0x0
 /* enum: receiving to multiple queues using RSS context */
 #define          MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_RSS  0x1
 /* RSS context (for RX_MODE_RSS) */
 #define       MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_OFST 12
+#define       MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_LEN 4
 
 
 /***********************************/
 #define    MC_CMD_SET_PARSER_DISP_CONFIG_IN_LEN(num) (8+4*(num))
 /* the type of configuration setting to change */
 #define       MC_CMD_SET_PARSER_DISP_CONFIG_IN_TYPE_OFST 0
+#define       MC_CMD_SET_PARSER_DISP_CONFIG_IN_TYPE_LEN 4
 /* enum: Per-TXQ enable for multicast UDP destination lookup for possible
  * internal loopback. (ENTITY is a queue handle, VALUE is a single boolean.)
  */
  * on the type of configuration setting being changed
  */
 #define       MC_CMD_SET_PARSER_DISP_CONFIG_IN_ENTITY_OFST 4
+#define       MC_CMD_SET_PARSER_DISP_CONFIG_IN_ENTITY_LEN 4
 /* new value: the details depend on the type of configuration setting being
  * changed
  */
 #define    MC_CMD_GET_PARSER_DISP_CONFIG_IN_LEN 8
 /* the type of configuration setting to read */
 #define       MC_CMD_GET_PARSER_DISP_CONFIG_IN_TYPE_OFST 0
+#define       MC_CMD_GET_PARSER_DISP_CONFIG_IN_TYPE_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_SET_PARSER_DISP_CONFIG/MC_CMD_SET_PARSER_DISP_CONFIG_IN/TYPE */
 /* handle for the entity to query: queue handle, EVB port ID, etc. depending on
  * the type of configuration setting being read
  */
 #define       MC_CMD_GET_PARSER_DISP_CONFIG_IN_ENTITY_OFST 4
+#define       MC_CMD_GET_PARSER_DISP_CONFIG_IN_ENTITY_LEN 4
 
 /* MC_CMD_GET_PARSER_DISP_CONFIG_OUT msgresponse */
 #define    MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LENMIN 4
 #define    MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_LEN 16
 /* configuration flags */
 #define       MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_FLAGS_OFST 0
+#define       MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_FLAGS_LEN 4
 #define        MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_ENABLE_LBN 0
 #define        MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_ENABLE_WIDTH 1
 /* receive queue handle (for RSS mode, this is the base queue) */
 #define       MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_QUEUE_OFST 4
+#define       MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_QUEUE_LEN 4
 /* receive mode */
 #define       MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_OFST 8
+#define       MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_LEN 4
 /* enum: receive to just the specified queue */
 #define          MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_SIMPLE  0x0
 /* enum: receive to multiple queues using RSS context */
  * of 0xFFFFFFFF is guaranteed never to be a valid handle.
  */
 #define       MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_OFST 12
+#define       MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_LEN 4
 
 /* MC_CMD_SET_TX_PORT_SNIFF_CONFIG_OUT msgresponse */
 #define    MC_CMD_SET_TX_PORT_SNIFF_CONFIG_OUT_LEN 0
 #define    MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_LEN 16
 /* configuration flags */
 #define       MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_FLAGS_OFST 0
+#define       MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_FLAGS_LEN 4
 #define        MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_ENABLE_LBN 0
 #define        MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_ENABLE_WIDTH 1
 /* receiving queue handle (for RSS mode, this is the base queue) */
 #define       MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_OFST 4
+#define       MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_LEN 4
 /* receive mode */
 #define       MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_OFST 8
+#define       MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_LEN 4
 /* enum: receiving to just the specified queue */
 #define          MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_SIMPLE  0x0
 /* enum: receiving to multiple queues using RSS context */
 #define          MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_RSS  0x1
 /* RSS context (for RX_MODE_RSS) */
 #define       MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_OFST 12
+#define       MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_LEN 4
 
 
 /***********************************/
 #define    MC_CMD_RMON_STATS_RX_ERRORS_IN_LEN 8
 /* The rx queue to get stats for. */
 #define       MC_CMD_RMON_STATS_RX_ERRORS_IN_RX_QUEUE_OFST 0
+#define       MC_CMD_RMON_STATS_RX_ERRORS_IN_RX_QUEUE_LEN 4
 #define       MC_CMD_RMON_STATS_RX_ERRORS_IN_FLAGS_OFST 4
+#define       MC_CMD_RMON_STATS_RX_ERRORS_IN_FLAGS_LEN 4
 #define        MC_CMD_RMON_STATS_RX_ERRORS_IN_RST_LBN 0
 #define        MC_CMD_RMON_STATS_RX_ERRORS_IN_RST_WIDTH 1
 
 /* MC_CMD_RMON_STATS_RX_ERRORS_OUT msgresponse */
 #define    MC_CMD_RMON_STATS_RX_ERRORS_OUT_LEN 16
 #define       MC_CMD_RMON_STATS_RX_ERRORS_OUT_CRC_ERRORS_OFST 0
+#define       MC_CMD_RMON_STATS_RX_ERRORS_OUT_CRC_ERRORS_LEN 4
 #define       MC_CMD_RMON_STATS_RX_ERRORS_OUT_TRUNC_ERRORS_OFST 4
+#define       MC_CMD_RMON_STATS_RX_ERRORS_OUT_TRUNC_ERRORS_LEN 4
 #define       MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_NO_DESC_DROPS_OFST 8
+#define       MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_NO_DESC_DROPS_LEN 4
 #define       MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_ABORT_OFST 12
+#define       MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_ABORT_LEN 4
 
 
 /***********************************/
  */
 #define MC_CMD_GET_PCIE_RESOURCE_INFO 0xfd
 
+#define MC_CMD_0xfd_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_GET_PCIE_RESOURCE_INFO_IN msgrequest */
 #define    MC_CMD_GET_PCIE_RESOURCE_INFO_IN_LEN 0
 
 #define    MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_LEN 28
 /* The maximum number of PFs the device can expose */
 #define       MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PFS_OFST 0
+#define       MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PFS_LEN 4
 /* The maximum number of VFs the device can expose in total */
 #define       MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VFS_OFST 4
+#define       MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VFS_LEN 4
 /* The maximum number of MSI-X vectors the device can provide in total */
 #define       MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VECTORS_OFST 8
+#define       MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VECTORS_LEN 4
 /* the number of MSI-X vectors the device will allocate by default to each PF
  */
 #define       MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_PF_VECTORS_OFST 12
+#define       MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_PF_VECTORS_LEN 4
 /* the number of MSI-X vectors the device will allocate by default to each VF
  */
 #define       MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_VF_VECTORS_OFST 16
+#define       MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_VF_VECTORS_LEN 4
 /* the maximum number of MSI-X vectors the device can allocate to any one PF */
 #define       MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PF_VECTORS_OFST 20
+#define       MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PF_VECTORS_LEN 4
 /* the maximum number of MSI-X vectors the device can allocate to any one VF */
 #define       MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VF_VECTORS_OFST 24
+#define       MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VF_VECTORS_LEN 4
 
 
 /***********************************/
 #define    MC_CMD_GET_PORT_MODES_OUT_LEN 12
 /* Bitmask of port modes available on the board (indexed by TLV_PORT_MODE_*) */
 #define       MC_CMD_GET_PORT_MODES_OUT_MODES_OFST 0
+#define       MC_CMD_GET_PORT_MODES_OUT_MODES_LEN 4
 /* Default (canonical) board mode */
 #define       MC_CMD_GET_PORT_MODES_OUT_DEFAULT_MODE_OFST 4
+#define       MC_CMD_GET_PORT_MODES_OUT_DEFAULT_MODE_LEN 4
 /* Current board mode */
 #define       MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_OFST 8
+#define       MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_LEN 4
 
 
 /***********************************/
  */
 #define MC_CMD_READ_ATB 0x100
 
-#define MC_CMD_0x100_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x100_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_READ_ATB_IN msgrequest */
 #define    MC_CMD_READ_ATB_IN_LEN 16
 #define       MC_CMD_READ_ATB_IN_SIGNAL_BUS_OFST 0
+#define       MC_CMD_READ_ATB_IN_SIGNAL_BUS_LEN 4
 #define          MC_CMD_READ_ATB_IN_BUS_CCOM  0x0 /* enum */
 #define          MC_CMD_READ_ATB_IN_BUS_CKR  0x1 /* enum */
 #define          MC_CMD_READ_ATB_IN_BUS_CPCIE  0x8 /* enum */
 #define       MC_CMD_READ_ATB_IN_SIGNAL_EN_BITNO_OFST 4
+#define       MC_CMD_READ_ATB_IN_SIGNAL_EN_BITNO_LEN 4
 #define       MC_CMD_READ_ATB_IN_SIGNAL_SEL_OFST 8
+#define       MC_CMD_READ_ATB_IN_SIGNAL_SEL_LEN 4
 #define       MC_CMD_READ_ATB_IN_SETTLING_TIME_US_OFST 12
+#define       MC_CMD_READ_ATB_IN_SETTLING_TIME_US_LEN 4
 
 /* MC_CMD_READ_ATB_OUT msgresponse */
 #define    MC_CMD_READ_ATB_OUT_LEN 4
 #define       MC_CMD_READ_ATB_OUT_SAMPLE_MV_OFST 0
+#define       MC_CMD_READ_ATB_OUT_SAMPLE_MV_LEN 4
 
 
 /***********************************/
 /* Each workaround is represented by a single bit according to the enums below.
  */
 #define       MC_CMD_GET_WORKAROUNDS_OUT_IMPLEMENTED_OFST 0
+#define       MC_CMD_GET_WORKAROUNDS_OUT_IMPLEMENTED_LEN 4
 #define       MC_CMD_GET_WORKAROUNDS_OUT_ENABLED_OFST 4
+#define       MC_CMD_GET_WORKAROUNDS_OUT_ENABLED_LEN 4
 /* enum: Bug 17230 work around. */
 #define          MC_CMD_GET_WORKAROUNDS_OUT_BUG17230 0x2
 /* enum: Bug 35388 work around (unsafe EVQ writes). */
  * 1,3 = 0x00030001
  */
 #define       MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_OFST 0
+#define       MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_LEN 4
 #define        MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_PF_LBN 0
 #define        MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_PF_WIDTH 16
 #define        MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_VF_LBN 16
  * set to 1.
  */
 #define       MC_CMD_PRIVILEGE_MASK_IN_NEW_MASK_OFST 4
+#define       MC_CMD_PRIVILEGE_MASK_IN_NEW_MASK_LEN 4
 #define          MC_CMD_PRIVILEGE_MASK_IN_GRP_ADMIN             0x1 /* enum */
 #define          MC_CMD_PRIVILEGE_MASK_IN_GRP_LINK              0x2 /* enum */
 #define          MC_CMD_PRIVILEGE_MASK_IN_GRP_ONLOAD            0x4 /* enum */
  * only a set of permitted VLANs. See the vPort flag FLAG_VLAN_RESTRICT.
  */
 #define          MC_CMD_PRIVILEGE_MASK_IN_GRP_UNRESTRICTED_VLAN  0x2000
+/* enum: Privilege for insecure commands. Commands that belong to this group
+ * are not permitted on secure adapters regardless of the privilege mask.
+ */
+#define          MC_CMD_PRIVILEGE_MASK_IN_GRP_INSECURE          0x4000
 /* enum: Set this bit to indicate that a new privilege mask is to be set,
  * otherwise the command will only read the existing mask.
  */
 #define    MC_CMD_PRIVILEGE_MASK_OUT_LEN 4
 /* For an admin function, always all the privileges are reported. */
 #define       MC_CMD_PRIVILEGE_MASK_OUT_OLD_MASK_OFST 0
+#define       MC_CMD_PRIVILEGE_MASK_OUT_OLD_MASK_LEN 4
 
 
 /***********************************/
  * e.g. VF 1,3 = 0x00030001
  */
 #define       MC_CMD_LINK_STATE_MODE_IN_FUNCTION_OFST 0
+#define       MC_CMD_LINK_STATE_MODE_IN_FUNCTION_LEN 4
 #define        MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_LBN 0
 #define        MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_WIDTH 16
 #define        MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_LBN 16
 #define        MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_WIDTH 16
 /* New link state mode to be set */
 #define       MC_CMD_LINK_STATE_MODE_IN_NEW_MODE_OFST 4
+#define       MC_CMD_LINK_STATE_MODE_IN_NEW_MODE_LEN 4
 #define          MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_AUTO       0x0 /* enum */
 #define          MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_UP         0x1 /* enum */
 #define          MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_DOWN       0x2 /* enum */
 /* MC_CMD_LINK_STATE_MODE_OUT msgresponse */
 #define    MC_CMD_LINK_STATE_MODE_OUT_LEN 4
 #define       MC_CMD_LINK_STATE_MODE_OUT_OLD_MODE_OFST 0
+#define       MC_CMD_LINK_STATE_MODE_OUT_OLD_MODE_LEN 4
 
 
 /***********************************/
 /* MC_CMD_GET_SNAPSHOT_LENGTH
- * Obtain the curent range of allowable values for the SNAPSHOT_LENGTH
+ * Obtain the current range of allowable values for the SNAPSHOT_LENGTH
  * parameter to MC_CMD_INIT_RXQ.
  */
 #define MC_CMD_GET_SNAPSHOT_LENGTH 0x101
 #define    MC_CMD_GET_SNAPSHOT_LENGTH_OUT_LEN 8
 /* Minimum acceptable snapshot length. */
 #define       MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MIN_OFST 0
+#define       MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MIN_LEN 4
 /* Maximum acceptable snapshot length. */
 #define       MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MAX_OFST 4
+#define       MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MAX_LEN 4
 
 
 /***********************************/
  */
 #define MC_CMD_FUSE_DIAGS 0x102
 
-#define MC_CMD_0x102_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x102_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_FUSE_DIAGS_IN msgrequest */
 #define    MC_CMD_FUSE_DIAGS_IN_LEN 0
 #define    MC_CMD_FUSE_DIAGS_OUT_LEN 48
 /* Total number of mismatched bits between pairs in area 0 */
 #define       MC_CMD_FUSE_DIAGS_OUT_AREA0_MISMATCH_BITS_OFST 0
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA0_MISMATCH_BITS_LEN 4
 /* Total number of unexpectedly clear (set in B but not A) bits in area 0 */
 #define       MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_A_BAD_BITS_OFST 4
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_A_BAD_BITS_LEN 4
 /* Total number of unexpectedly clear (set in A but not B) bits in area 0 */
 #define       MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_B_BAD_BITS_OFST 8
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_B_BAD_BITS_LEN 4
 /* Checksum of data after logical OR of pairs in area 0 */
 #define       MC_CMD_FUSE_DIAGS_OUT_AREA0_CHECKSUM_OFST 12
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA0_CHECKSUM_LEN 4
 /* Total number of mismatched bits between pairs in area 1 */
 #define       MC_CMD_FUSE_DIAGS_OUT_AREA1_MISMATCH_BITS_OFST 16
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA1_MISMATCH_BITS_LEN 4
 /* Total number of unexpectedly clear (set in B but not A) bits in area 1 */
 #define       MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_A_BAD_BITS_OFST 20
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_A_BAD_BITS_LEN 4
 /* Total number of unexpectedly clear (set in A but not B) bits in area 1 */
 #define       MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_B_BAD_BITS_OFST 24
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_B_BAD_BITS_LEN 4
 /* Checksum of data after logical OR of pairs in area 1 */
 #define       MC_CMD_FUSE_DIAGS_OUT_AREA1_CHECKSUM_OFST 28
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA1_CHECKSUM_LEN 4
 /* Total number of mismatched bits between pairs in area 2 */
 #define       MC_CMD_FUSE_DIAGS_OUT_AREA2_MISMATCH_BITS_OFST 32
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA2_MISMATCH_BITS_LEN 4
 /* Total number of unexpectedly clear (set in B but not A) bits in area 2 */
 #define       MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_A_BAD_BITS_OFST 36
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_A_BAD_BITS_LEN 4
 /* Total number of unexpectedly clear (set in A but not B) bits in area 2 */
 #define       MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_B_BAD_BITS_OFST 40
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_B_BAD_BITS_LEN 4
 /* Checksum of data after logical OR of pairs in area 2 */
 #define       MC_CMD_FUSE_DIAGS_OUT_AREA2_CHECKSUM_OFST 44
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA2_CHECKSUM_LEN 4
 
 
 /***********************************/
 #define    MC_CMD_PRIVILEGE_MODIFY_IN_LEN 16
 /* The groups of functions to have their privilege masks modified. */
 #define       MC_CMD_PRIVILEGE_MODIFY_IN_FN_GROUP_OFST 0
+#define       MC_CMD_PRIVILEGE_MODIFY_IN_FN_GROUP_LEN 4
 #define          MC_CMD_PRIVILEGE_MODIFY_IN_NONE       0x0 /* enum */
 #define          MC_CMD_PRIVILEGE_MODIFY_IN_ALL        0x1 /* enum */
 #define          MC_CMD_PRIVILEGE_MODIFY_IN_PFS_ONLY   0x2 /* enum */
 #define          MC_CMD_PRIVILEGE_MODIFY_IN_ONE        0x5 /* enum */
 /* For VFS_OF_PF specify the PF, for ONE specify the target function */
 #define       MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_OFST 4
+#define       MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_LEN 4
 #define        MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_PF_LBN 0
 #define        MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_PF_WIDTH 16
 #define        MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_VF_LBN 16
  * refer to the command MC_CMD_PRIVILEGE_MASK
  */
 #define       MC_CMD_PRIVILEGE_MODIFY_IN_ADD_MASK_OFST 8
+#define       MC_CMD_PRIVILEGE_MODIFY_IN_ADD_MASK_LEN 4
 /* Privileges to be removed from the target functions. For privilege
  * definitions refer to the command MC_CMD_PRIVILEGE_MASK
  */
 #define       MC_CMD_PRIVILEGE_MODIFY_IN_REMOVE_MASK_OFST 12
+#define       MC_CMD_PRIVILEGE_MODIFY_IN_REMOVE_MASK_LEN 4
 
 /* MC_CMD_PRIVILEGE_MODIFY_OUT msgresponse */
 #define    MC_CMD_PRIVILEGE_MODIFY_OUT_LEN 0
 #define    MC_CMD_XPM_READ_BYTES_IN_LEN 8
 /* Start address (byte) */
 #define       MC_CMD_XPM_READ_BYTES_IN_ADDR_OFST 0
+#define       MC_CMD_XPM_READ_BYTES_IN_ADDR_LEN 4
 /* Count (bytes) */
 #define       MC_CMD_XPM_READ_BYTES_IN_COUNT_OFST 4
+#define       MC_CMD_XPM_READ_BYTES_IN_COUNT_LEN 4
 
 /* MC_CMD_XPM_READ_BYTES_OUT msgresponse */
 #define    MC_CMD_XPM_READ_BYTES_OUT_LENMIN 0
  */
 #define MC_CMD_XPM_WRITE_BYTES 0x104
 
-#define MC_CMD_0x104_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x104_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_XPM_WRITE_BYTES_IN msgrequest */
 #define    MC_CMD_XPM_WRITE_BYTES_IN_LENMIN 8
 #define    MC_CMD_XPM_WRITE_BYTES_IN_LEN(num) (8+1*(num))
 /* Start address (byte) */
 #define       MC_CMD_XPM_WRITE_BYTES_IN_ADDR_OFST 0
+#define       MC_CMD_XPM_WRITE_BYTES_IN_ADDR_LEN 4
 /* Count (bytes) */
 #define       MC_CMD_XPM_WRITE_BYTES_IN_COUNT_OFST 4
+#define       MC_CMD_XPM_WRITE_BYTES_IN_COUNT_LEN 4
 /* Data */
 #define       MC_CMD_XPM_WRITE_BYTES_IN_DATA_OFST 8
 #define       MC_CMD_XPM_WRITE_BYTES_IN_DATA_LEN 1
  */
 #define MC_CMD_XPM_READ_SECTOR 0x105
 
-#define MC_CMD_0x105_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x105_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_XPM_READ_SECTOR_IN msgrequest */
 #define    MC_CMD_XPM_READ_SECTOR_IN_LEN 8
 /* Sector index */
 #define       MC_CMD_XPM_READ_SECTOR_IN_INDEX_OFST 0
+#define       MC_CMD_XPM_READ_SECTOR_IN_INDEX_LEN 4
 /* Sector size */
 #define       MC_CMD_XPM_READ_SECTOR_IN_SIZE_OFST 4
+#define       MC_CMD_XPM_READ_SECTOR_IN_SIZE_LEN 4
 
 /* MC_CMD_XPM_READ_SECTOR_OUT msgresponse */
 #define    MC_CMD_XPM_READ_SECTOR_OUT_LENMIN 4
 #define    MC_CMD_XPM_READ_SECTOR_OUT_LEN(num) (4+1*(num))
 /* Sector type */
 #define       MC_CMD_XPM_READ_SECTOR_OUT_TYPE_OFST 0
+#define       MC_CMD_XPM_READ_SECTOR_OUT_TYPE_LEN 4
 #define          MC_CMD_XPM_READ_SECTOR_OUT_BLANK            0x0 /* enum */
 #define          MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_KEY_128   0x1 /* enum */
 #define          MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_KEY_256   0x2 /* enum */
+#define          MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_DATA      0x3 /* enum */
 #define          MC_CMD_XPM_READ_SECTOR_OUT_INVALID          0xff /* enum */
 /* Sector data */
 #define       MC_CMD_XPM_READ_SECTOR_OUT_DATA_OFST 4
  */
 #define MC_CMD_XPM_WRITE_SECTOR 0x106
 
-#define MC_CMD_0x106_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x106_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_XPM_WRITE_SECTOR_IN msgrequest */
 #define    MC_CMD_XPM_WRITE_SECTOR_IN_LENMIN 12
 #define       MC_CMD_XPM_WRITE_SECTOR_IN_RESERVED_LEN 3
 /* Sector type */
 #define       MC_CMD_XPM_WRITE_SECTOR_IN_TYPE_OFST 4
+#define       MC_CMD_XPM_WRITE_SECTOR_IN_TYPE_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_XPM_READ_SECTOR/MC_CMD_XPM_READ_SECTOR_OUT/TYPE */
 /* Sector size */
 #define       MC_CMD_XPM_WRITE_SECTOR_IN_SIZE_OFST 8
+#define       MC_CMD_XPM_WRITE_SECTOR_IN_SIZE_LEN 4
 /* Sector data */
 #define       MC_CMD_XPM_WRITE_SECTOR_IN_DATA_OFST 12
 #define       MC_CMD_XPM_WRITE_SECTOR_IN_DATA_LEN 1
 #define    MC_CMD_XPM_WRITE_SECTOR_OUT_LEN 4
 /* New sector index */
 #define       MC_CMD_XPM_WRITE_SECTOR_OUT_INDEX_OFST 0
+#define       MC_CMD_XPM_WRITE_SECTOR_OUT_INDEX_LEN 4
 
 
 /***********************************/
  */
 #define MC_CMD_XPM_INVALIDATE_SECTOR 0x107
 
-#define MC_CMD_0x107_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x107_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_XPM_INVALIDATE_SECTOR_IN msgrequest */
 #define    MC_CMD_XPM_INVALIDATE_SECTOR_IN_LEN 4
 /* Sector index */
 #define       MC_CMD_XPM_INVALIDATE_SECTOR_IN_INDEX_OFST 0
+#define       MC_CMD_XPM_INVALIDATE_SECTOR_IN_INDEX_LEN 4
 
 /* MC_CMD_XPM_INVALIDATE_SECTOR_OUT msgresponse */
 #define    MC_CMD_XPM_INVALIDATE_SECTOR_OUT_LEN 0
  */
 #define MC_CMD_XPM_BLANK_CHECK 0x108
 
-#define MC_CMD_0x108_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x108_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_XPM_BLANK_CHECK_IN msgrequest */
 #define    MC_CMD_XPM_BLANK_CHECK_IN_LEN 8
 /* Start address (byte) */
 #define       MC_CMD_XPM_BLANK_CHECK_IN_ADDR_OFST 0
+#define       MC_CMD_XPM_BLANK_CHECK_IN_ADDR_LEN 4
 /* Count (bytes) */
 #define       MC_CMD_XPM_BLANK_CHECK_IN_COUNT_OFST 4
+#define       MC_CMD_XPM_BLANK_CHECK_IN_COUNT_LEN 4
 
 /* MC_CMD_XPM_BLANK_CHECK_OUT msgresponse */
 #define    MC_CMD_XPM_BLANK_CHECK_OUT_LENMIN 4
 #define    MC_CMD_XPM_BLANK_CHECK_OUT_LEN(num) (4+2*(num))
 /* Total number of bad (non-blank) locations */
 #define       MC_CMD_XPM_BLANK_CHECK_OUT_BAD_COUNT_OFST 0
+#define       MC_CMD_XPM_BLANK_CHECK_OUT_BAD_COUNT_LEN 4
 /* Addresses of bad locations (may be less than BAD_COUNT, if all cannot fit
  * into MCDI response)
  */
  */
 #define MC_CMD_XPM_REPAIR 0x109
 
-#define MC_CMD_0x109_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x109_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_XPM_REPAIR_IN msgrequest */
 #define    MC_CMD_XPM_REPAIR_IN_LEN 8
 /* Start address (byte) */
 #define       MC_CMD_XPM_REPAIR_IN_ADDR_OFST 0
+#define       MC_CMD_XPM_REPAIR_IN_ADDR_LEN 4
 /* Count (bytes) */
 #define       MC_CMD_XPM_REPAIR_IN_COUNT_OFST 4
+#define       MC_CMD_XPM_REPAIR_IN_COUNT_LEN 4
 
 /* MC_CMD_XPM_REPAIR_OUT msgresponse */
 #define    MC_CMD_XPM_REPAIR_OUT_LEN 0
  */
 #define MC_CMD_XPM_DECODER_TEST 0x10a
 
-#define MC_CMD_0x10a_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x10a_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_XPM_DECODER_TEST_IN msgrequest */
 #define    MC_CMD_XPM_DECODER_TEST_IN_LEN 0
  */
 #define MC_CMD_XPM_WRITE_TEST 0x10b
 
-#define MC_CMD_0x10b_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x10b_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_XPM_WRITE_TEST_IN msgrequest */
 #define    MC_CMD_XPM_WRITE_TEST_IN_LEN 0
 #define    MC_CMD_EXEC_SIGNED_IN_LEN 28
 /* the length of code to include in the CMAC */
 #define       MC_CMD_EXEC_SIGNED_IN_CODELEN_OFST 0
+#define       MC_CMD_EXEC_SIGNED_IN_CODELEN_LEN 4
 /* the length of date to include in the CMAC */
 #define       MC_CMD_EXEC_SIGNED_IN_DATALEN_OFST 4
+#define       MC_CMD_EXEC_SIGNED_IN_DATALEN_LEN 4
 /* the XPM sector containing the key to use */
 #define       MC_CMD_EXEC_SIGNED_IN_KEYSECTOR_OFST 8
+#define       MC_CMD_EXEC_SIGNED_IN_KEYSECTOR_LEN 4
 /* the expected CMAC value */
 #define       MC_CMD_EXEC_SIGNED_IN_CMAC_OFST 12
 #define       MC_CMD_EXEC_SIGNED_IN_CMAC_LEN 16
 #define    MC_CMD_PREPARE_SIGNED_IN_LEN 4
 /* the length of data area to clear */
 #define       MC_CMD_PREPARE_SIGNED_IN_DATALEN_OFST 0
+#define       MC_CMD_PREPARE_SIGNED_IN_DATALEN_LEN 4
 
 /* MC_CMD_PREPARE_SIGNED_OUT msgresponse */
 #define    MC_CMD_PREPARE_SIGNED_OUT_LEN 0
 
 
+/* TUNNEL_ENCAP_UDP_PORT_ENTRY structuredef */
+#define    TUNNEL_ENCAP_UDP_PORT_ENTRY_LEN 4
+/* UDP port (the standard ports are named below but any port may be used) */
+#define       TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_OFST 0
+#define       TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_LEN 2
+/* enum: the IANA allocated UDP port for VXLAN */
+#define          TUNNEL_ENCAP_UDP_PORT_ENTRY_IANA_VXLAN_UDP_PORT  0x12b5
+/* enum: the IANA allocated UDP port for Geneve */
+#define          TUNNEL_ENCAP_UDP_PORT_ENTRY_IANA_GENEVE_UDP_PORT  0x17c1
+#define       TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_LBN 0
+#define       TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_WIDTH 16
+/* tunnel encapsulation protocol (only those named below are supported) */
+#define       TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_OFST 2
+#define       TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_LEN 2
+/* enum: This port will be used for VXLAN on both IPv4 and IPv6 */
+#define          TUNNEL_ENCAP_UDP_PORT_ENTRY_VXLAN  0x0
+/* enum: This port will be used for Geneve on both IPv4 and IPv6 */
+#define          TUNNEL_ENCAP_UDP_PORT_ENTRY_GENEVE  0x1
+#define       TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_LBN 16
+#define       TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_WIDTH 16
+
+
 /***********************************/
 /* MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS
  * Configure UDP ports for tunnel encapsulation hardware acceleration. The
 #define        MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_RESETTING_LBN 0
 #define        MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_RESETTING_WIDTH 1
 
-/* TUNNEL_ENCAP_UDP_PORT_ENTRY structuredef */
-#define    TUNNEL_ENCAP_UDP_PORT_ENTRY_LEN 4
-/* UDP port (the standard ports are named below but any port may be used) */
-#define       TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_OFST 0
-#define       TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_LEN 2
-/* enum: the IANA allocated UDP port for VXLAN */
-#define          TUNNEL_ENCAP_UDP_PORT_ENTRY_IANA_VXLAN_UDP_PORT  0x12b5
-/* enum: the IANA allocated UDP port for Geneve */
-#define          TUNNEL_ENCAP_UDP_PORT_ENTRY_IANA_GENEVE_UDP_PORT  0x17c1
-#define       TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_LBN 0
-#define       TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_WIDTH 16
-/* tunnel encapsulation protocol (only those named below are supported) */
-#define       TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_OFST 2
-#define       TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_LEN 2
-/* enum: VXLAN */
-#define          TUNNEL_ENCAP_UDP_PORT_ENTRY_VXLAN  0x0
-/* enum: Geneve */
-#define          TUNNEL_ENCAP_UDP_PORT_ENTRY_GENEVE  0x1
-#define       TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_LBN 16
-#define       TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_WIDTH 16
-
 
 /***********************************/
 /* MC_CMD_RX_BALANCING
 #define    MC_CMD_RX_BALANCING_IN_LEN 16
 /* The RX port whose upconverter table will be modified */
 #define       MC_CMD_RX_BALANCING_IN_PORT_OFST 0
+#define       MC_CMD_RX_BALANCING_IN_PORT_LEN 4
 /* The VLAN priority associated to the table index and vFIFO */
 #define       MC_CMD_RX_BALANCING_IN_PRIORITY_OFST 4
+#define       MC_CMD_RX_BALANCING_IN_PRIORITY_LEN 4
 /* The resulting bit of SRC^DST for indexing the table */
 #define       MC_CMD_RX_BALANCING_IN_SRC_DST_OFST 8
+#define       MC_CMD_RX_BALANCING_IN_SRC_DST_LEN 4
 /* The RX engine to which the vFIFO in the table entry will point to */
 #define       MC_CMD_RX_BALANCING_IN_ENG_OFST 12
+#define       MC_CMD_RX_BALANCING_IN_ENG_LEN 4
 
 /* MC_CMD_RX_BALANCING_OUT msgresponse */
 #define    MC_CMD_RX_BALANCING_OUT_LEN 0
 #define    MC_CMD_NVRAM_PRIVATE_APPEND_IN_LEN(num) (8+1*(num))
 /* The tag to be appended */
 #define       MC_CMD_NVRAM_PRIVATE_APPEND_IN_TAG_OFST 0
+#define       MC_CMD_NVRAM_PRIVATE_APPEND_IN_TAG_LEN 4
 /* The length of the data */
 #define       MC_CMD_NVRAM_PRIVATE_APPEND_IN_LENGTH_OFST 4
+#define       MC_CMD_NVRAM_PRIVATE_APPEND_IN_LENGTH_LEN 4
 /* The data to be contained in the TLV structure */
 #define       MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_OFST 8
 #define       MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_LEN 1
 #define    MC_CMD_XPM_VERIFY_CONTENTS_IN_LEN 4
 /* Data type to be checked */
 #define       MC_CMD_XPM_VERIFY_CONTENTS_IN_DATA_TYPE_OFST 0
+#define       MC_CMD_XPM_VERIFY_CONTENTS_IN_DATA_TYPE_LEN 4
 
 /* MC_CMD_XPM_VERIFY_CONTENTS_OUT msgresponse */
 #define    MC_CMD_XPM_VERIFY_CONTENTS_OUT_LENMIN 12
 #define    MC_CMD_XPM_VERIFY_CONTENTS_OUT_LEN(num) (12+1*(num))
 /* Number of sectors found (test builds only) */
 #define       MC_CMD_XPM_VERIFY_CONTENTS_OUT_NUM_SECTORS_OFST 0
+#define       MC_CMD_XPM_VERIFY_CONTENTS_OUT_NUM_SECTORS_LEN 4
 /* Number of bytes found (test builds only) */
 #define       MC_CMD_XPM_VERIFY_CONTENTS_OUT_NUM_BYTES_OFST 4
+#define       MC_CMD_XPM_VERIFY_CONTENTS_OUT_NUM_BYTES_LEN 4
 /* Length of signature */
 #define       MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIG_LENGTH_OFST 8
+#define       MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIG_LENGTH_LEN 4
 /* Signature */
 #define       MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_OFST 12
 #define       MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_LEN 1
 #define    MC_CMD_SET_EVQ_TMR_IN_LEN 16
 /* Function-relative queue instance */
 #define       MC_CMD_SET_EVQ_TMR_IN_INSTANCE_OFST 0
+#define       MC_CMD_SET_EVQ_TMR_IN_INSTANCE_LEN 4
 /* Requested value for timer load (in nanoseconds) */
 #define       MC_CMD_SET_EVQ_TMR_IN_TMR_LOAD_REQ_NS_OFST 4
+#define       MC_CMD_SET_EVQ_TMR_IN_TMR_LOAD_REQ_NS_LEN 4
 /* Requested value for timer reload (in nanoseconds) */
 #define       MC_CMD_SET_EVQ_TMR_IN_TMR_RELOAD_REQ_NS_OFST 8
+#define       MC_CMD_SET_EVQ_TMR_IN_TMR_RELOAD_REQ_NS_LEN 4
 /* Timer mode. Meanings as per EVQ_TMR_REG.TC_TIMER_VAL */
 #define       MC_CMD_SET_EVQ_TMR_IN_TMR_MODE_OFST 12
+#define       MC_CMD_SET_EVQ_TMR_IN_TMR_MODE_LEN 4
 #define          MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_DIS  0x0 /* enum */
 #define          MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_IMMED_START  0x1 /* enum */
 #define          MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_TRIG_START  0x2 /* enum */
 #define    MC_CMD_SET_EVQ_TMR_OUT_LEN 8
 /* Actual value for timer load (in nanoseconds) */
 #define       MC_CMD_SET_EVQ_TMR_OUT_TMR_LOAD_ACT_NS_OFST 0
+#define       MC_CMD_SET_EVQ_TMR_OUT_TMR_LOAD_ACT_NS_LEN 4
 /* Actual value for timer reload (in nanoseconds) */
 #define       MC_CMD_SET_EVQ_TMR_OUT_TMR_RELOAD_ACT_NS_OFST 4
+#define       MC_CMD_SET_EVQ_TMR_OUT_TMR_RELOAD_ACT_NS_LEN 4
 
 
 /***********************************/
 #define    MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_LEN 36
 /* Reserved for future use. */
 #define       MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_FLAGS_OFST 0
+#define       MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_FLAGS_LEN 4
 /* For timers updated via writes to EVQ_TMR_REG, this is the time interval (in
  * nanoseconds) for each increment of the timer load/reload count. The
  * requested duration of a timer is this value multiplied by the timer
  * load/reload count.
  */
 #define       MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_NS_PER_COUNT_OFST 4
+#define       MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_NS_PER_COUNT_LEN 4
 /* For timers updated via writes to EVQ_TMR_REG, this is the maximum value
  * allowed for timer load/reload counts.
  */
 #define       MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_MAX_COUNT_OFST 8
+#define       MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_MAX_COUNT_LEN 4
 /* For timers updated via writes to EVQ_TMR_REG, timer load/reload counts not a
  * multiple of this step size will be rounded in an implementation defined
  * manner.
  */
 #define       MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_STEP_OFST 12
+#define       MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_STEP_LEN 4
 /* Maximum timer duration (in nanoseconds) for timers updated via MCDI. Only
  * meaningful if MC_CMD_SET_EVQ_TMR is implemented.
  */
 #define       MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_MAX_NS_OFST 16
+#define       MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_MAX_NS_LEN 4
 /* Timer durations requested via MCDI that are not a multiple of this step size
  * will be rounded up. Only meaningful if MC_CMD_SET_EVQ_TMR is implemented.
  */
 #define       MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_STEP_NS_OFST 20
+#define       MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_STEP_NS_LEN 4
 /* For timers updated using the bug35388 workaround, this is the time interval
  * (in nanoseconds) for each increment of the timer load/reload count. The
  * requested duration of a timer is this value multiplied by the timer
  * is enabled.
  */
 #define       MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_NS_PER_COUNT_OFST 24
+#define       MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_NS_PER_COUNT_LEN 4
 /* For timers updated using the bug35388 workaround, this is the maximum value
  * allowed for timer load/reload counts. This field is only meaningful if the
  * bug35388 workaround is enabled.
  */
 #define       MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_MAX_COUNT_OFST 28
+#define       MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_MAX_COUNT_LEN 4
 /* For timers updated using the bug35388 workaround, timer load/reload counts
  * not a multiple of this step size will be rounded in an implementation
  * defined manner. This field is only meaningful if the bug35388 workaround is
  * enabled.
  */
 #define       MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_STEP_OFST 32
+#define       MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_STEP_LEN 4
 
 
 /***********************************/
  * local queue index.
  */
 #define       MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_INSTANCE_OFST 0
+#define       MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_INSTANCE_LEN 4
 /* Will the common pool be used as TX_vFIFO_ULL (1) */
 #define       MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_MODE_OFST 4
+#define       MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_MODE_LEN 4
 #define          MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_ENABLED       0x1 /* enum */
 /* enum: Using this interface without TX_vFIFO_ULL is not supported for now */
 #define          MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_DISABLED      0x0
 /* Number of buffers to reserve for the common pool */
 #define       MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_SIZE_OFST 8
+#define       MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_SIZE_LEN 4
 /* TX datapath to which the Common Pool is connected to. */
 #define       MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_INGRESS_OFST 12
+#define       MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_INGRESS_LEN 4
 /* enum: Extracts information from function */
 #define          MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_USE_FUNCTION_VALUE          -0x1
 /* Network port or RX Engine to which the common pool connects. */
 #define       MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_EGRESS_OFST 16
+#define       MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_EGRESS_LEN 4
 /* enum: Extracts information from function */
 /*               MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_USE_FUNCTION_VALUE          -0x1 */
 #define          MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_PORT0          0x0 /* enum */
 #define    MC_CMD_ALLOCATE_TX_VFIFO_CP_OUT_LEN 4
 /* ID of the common pool allocated */
 #define       MC_CMD_ALLOCATE_TX_VFIFO_CP_OUT_CP_ID_OFST 0
+#define       MC_CMD_ALLOCATE_TX_VFIFO_CP_OUT_CP_ID_LEN 4
 
 
 /***********************************/
 /* Common pool previously allocated to which the new vFIFO will be associated
  */
 #define       MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_CP_OFST 0
+#define       MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_CP_LEN 4
 /* Port or RX engine to associate the vFIFO egress */
 #define       MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_EGRESS_OFST 4
+#define       MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_EGRESS_LEN 4
 /* enum: Extracts information from common pool */
 #define          MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_USE_CP_VALUE   -0x1
 #define          MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PORT0          0x0 /* enum */
 #define          MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_RX_ENGINE1     0x5
 /* Minimum number of buffers that the pool must have */
 #define       MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_SIZE_OFST 8
+#define       MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_SIZE_LEN 4
 /* enum: Do not check the space available */
 #define          MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_NO_MINIMUM     0x0
 /* Will the vFIFO be used as TX_vFIFO_ULL */
 #define       MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_MODE_OFST 12
+#define       MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_MODE_LEN 4
 /* Network priority of the vFIFO,if applicable */
 #define       MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PRIORITY_OFST 16
+#define       MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PRIORITY_LEN 4
 /* enum: Search for the lowest unused priority */
 #define          MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_LOWEST_AVAILABLE  -0x1
 
 #define    MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_LEN 8
 /* Short vFIFO ID */
 #define       MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_VID_OFST 0
+#define       MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_VID_LEN 4
 /* Network priority of the vFIFO */
 #define       MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_PRIORITY_OFST 4
+#define       MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_PRIORITY_LEN 4
 
 
 /***********************************/
 #define    MC_CMD_TEARDOWN_TX_VFIFO_VF_IN_LEN 4
 /* Short vFIFO ID */
 #define       MC_CMD_TEARDOWN_TX_VFIFO_VF_IN_VFIFO_OFST 0
+#define       MC_CMD_TEARDOWN_TX_VFIFO_VF_IN_VFIFO_LEN 4
 
 /* MC_CMD_TEARDOWN_TX_VFIFO_VF_OUT msgresponse */
 #define    MC_CMD_TEARDOWN_TX_VFIFO_VF_OUT_LEN 0
 #define    MC_CMD_DEALLOCATE_TX_VFIFO_CP_IN_LEN 4
 /* Common pool ID given when pool allocated */
 #define       MC_CMD_DEALLOCATE_TX_VFIFO_CP_IN_POOL_ID_OFST 0
+#define       MC_CMD_DEALLOCATE_TX_VFIFO_CP_IN_POOL_ID_LEN 4
 
 /* MC_CMD_DEALLOCATE_TX_VFIFO_CP_OUT msgresponse */
 #define    MC_CMD_DEALLOCATE_TX_VFIFO_CP_OUT_LEN 0
 #define    MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_LEN 8
 /* Available buffers for the ENG to NET vFIFOs. */
 #define       MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_NET_OFST 0
+#define       MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_NET_LEN 4
 /* Available buffers for the ENG to ENG and NET to ENG vFIFOs. */
 #define       MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_ENG_OFST 4
+#define       MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_ENG_LEN 4
 
 
 #endif /* MCDI_PCOL_H */
index 6e1f282..65ee1a4 100644 (file)
@@ -1087,7 +1087,7 @@ static int efx_mcdi_mac_stats(struct efx_nic *efx,
        int period = action == EFX_STATS_ENABLE ? 1000 : 0;
        dma_addr_t dma_addr = efx->stats_buffer.dma_addr;
        u32 dma_len = action != EFX_STATS_DISABLE ?
-               MC_CMD_MAC_NSTATS * sizeof(u64) : 0;
+               efx->num_mac_stats * sizeof(u64) : 0;
 
        BUILD_BUG_ON(MC_CMD_MAC_STATS_OUT_DMA_LEN != 0);
 
@@ -1121,7 +1121,7 @@ void efx_mcdi_mac_start_stats(struct efx_nic *efx)
 {
        __le64 *dma_stats = efx->stats_buffer.addr;
 
-       dma_stats[MC_CMD_MAC_GENERATION_END] = EFX_MC_STATS_GENERATION_INVALID;
+       dma_stats[efx->num_mac_stats - 1] = EFX_MC_STATS_GENERATION_INVALID;
 
        efx_mcdi_mac_stats(efx, EFX_STATS_ENABLE, 0);
 }
@@ -1139,10 +1139,10 @@ void efx_mcdi_mac_pull_stats(struct efx_nic *efx)
        __le64 *dma_stats = efx->stats_buffer.addr;
        int attempts = EFX_MAC_STATS_WAIT_ATTEMPTS;
 
-       dma_stats[MC_CMD_MAC_GENERATION_END] = EFX_MC_STATS_GENERATION_INVALID;
+       dma_stats[efx->num_mac_stats - 1] = EFX_MC_STATS_GENERATION_INVALID;
        efx_mcdi_mac_stats(efx, EFX_STATS_PULL, 0);
 
-       while (dma_stats[MC_CMD_MAC_GENERATION_END] ==
+       while (dma_stats[efx->num_mac_stats - 1] ==
                                EFX_MC_STATS_GENERATION_INVALID &&
                        attempts-- != 0)
                udelay(EFX_MAC_STATS_WAIT_US);
@@ -1167,7 +1167,7 @@ int efx_mcdi_port_probe(struct efx_nic *efx)
 
        /* Allocate buffer for stats */
        rc = efx_nic_alloc_buffer(efx, &efx->stats_buffer,
-                                 MC_CMD_MAC_NSTATS * sizeof(u64), GFP_KERNEL);
+                                 efx->num_mac_stats * sizeof(u64), GFP_KERNEL);
        if (rc)
                return rc;
        netif_dbg(efx, probe, efx->net_dev,
index 6b8730a..4cedc5c 100644 (file)
@@ -774,6 +774,8 @@ struct vfdi_status;
  * @port_initialized: Port initialized?
  * @net_dev: Operating system network device. Consider holding the rtnl lock
  * @fixed_features: Features which cannot be turned off
+ * @num_mac_stats: Number of MAC stats reported by firmware (MAC_STATS_NUM_STATS
+ *     field of %MC_CMD_GET_CAPABILITIES_V4 response, or %MC_CMD_MAC_NSTATS)
  * @stats_buffer: DMA buffer for statistics
  * @phy_type: PHY type
  * @phy_op: PHY interface
@@ -922,6 +924,7 @@ struct efx_nic {
 
        netdev_features_t fixed_features;
 
+       u16 num_mac_stats;
        struct efx_buffer stats_buffer;
        u64 rx_nodesc_drops_total;
        u64 rx_nodesc_drops_while_down;
index 7b51b63..7630522 100644 (file)
@@ -325,6 +325,30 @@ enum {
        EF10_STAT_tx_bad,
        EF10_STAT_tx_bad_bytes,
        EF10_STAT_tx_overflow,
+       EF10_STAT_V1_COUNT,
+       EF10_STAT_fec_uncorrected_errors = EF10_STAT_V1_COUNT,
+       EF10_STAT_fec_corrected_errors,
+       EF10_STAT_fec_corrected_symbols_lane0,
+       EF10_STAT_fec_corrected_symbols_lane1,
+       EF10_STAT_fec_corrected_symbols_lane2,
+       EF10_STAT_fec_corrected_symbols_lane3,
+       EF10_STAT_ctpio_dmabuf_start,
+       EF10_STAT_ctpio_vi_busy_fallback,
+       EF10_STAT_ctpio_long_write_success,
+       EF10_STAT_ctpio_missing_dbell_fail,
+       EF10_STAT_ctpio_overflow_fail,
+       EF10_STAT_ctpio_underflow_fail,
+       EF10_STAT_ctpio_timeout_fail,
+       EF10_STAT_ctpio_noncontig_wr_fail,
+       EF10_STAT_ctpio_frm_clobber_fail,
+       EF10_STAT_ctpio_invalid_wr_fail,
+       EF10_STAT_ctpio_vi_clobber_fallback,
+       EF10_STAT_ctpio_unqualified_fallback,
+       EF10_STAT_ctpio_runt_fallback,
+       EF10_STAT_ctpio_success,
+       EF10_STAT_ctpio_fallback,
+       EF10_STAT_ctpio_poison,
+       EF10_STAT_ctpio_erase,
        EF10_STAT_COUNT
 };
 
index 22d49eb..ae8645a 100644 (file)
@@ -555,7 +555,7 @@ static int siena_try_update_nic_stats(struct efx_nic *efx)
 
        dma_stats = efx->stats_buffer.addr;
 
-       generation_end = dma_stats[MC_CMD_MAC_GENERATION_END];
+       generation_end = dma_stats[efx->num_mac_stats - 1];
        if (generation_end == EFX_MC_STATS_GENERATION_INVALID)
                return 0;
        rmb();
diff --git a/drivers/net/ethernet/socionext/Kconfig b/drivers/net/ethernet/socionext/Kconfig
new file mode 100644 (file)
index 0000000..3a1829e
--- /dev/null
@@ -0,0 +1,22 @@
+config NET_VENDOR_SOCIONEXT
+       bool "Socionext ethernet drivers"
+       default y
+       ---help---
+         Option to select ethernet drivers for Socionext platforms.
+
+         Note that the answer to this question doesn't directly affect the
+         kernel: saying N will just cause the configurator to skip all
+         the questions about Socionext devices. If you say Y, you will be asked
+         for your specific card in the following questions.
+
+if NET_VENDOR_SOCIONEXT
+
+config SNI_AVE
+       tristate "Socionext AVE ethernet support"
+       depends on (ARCH_UNIPHIER || COMPILE_TEST) && OF
+       select PHYLIB
+       ---help---
+         Driver for gigabit ethernet MACs, called AVE, in the
+         Socionext UniPhier family.
+
+endif #NET_VENDOR_SOCIONEXT
diff --git a/drivers/net/ethernet/socionext/Makefile b/drivers/net/ethernet/socionext/Makefile
new file mode 100644 (file)
index 0000000..ab83df6
--- /dev/null
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for all ethernet ip drivers on Socionext platforms
+#
+obj-$(CONFIG_SNI_AVE) += sni_ave.o
diff --git a/drivers/net/ethernet/socionext/sni_ave.c b/drivers/net/ethernet/socionext/sni_ave.c
new file mode 100644 (file)
index 0000000..111e7ca
--- /dev/null
@@ -0,0 +1,1736 @@
+// SPDX-License-Identifier: GPL-2.0
+/**
+ * sni_ave.c - Socionext UniPhier AVE ethernet driver
+ * Copyright 2014 Panasonic Corporation
+ * Copyright 2015-2017 Socionext Inc.
+ */
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/etherdevice.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/mii.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/of_net.h>
+#include <linux/of_mdio.h>
+#include <linux/of_platform.h>
+#include <linux/phy.h>
+#include <linux/reset.h>
+#include <linux/types.h>
+#include <linux/u64_stats_sync.h>
+
+/* General Register Group */
+#define AVE_IDR                        0x000   /* ID */
+#define AVE_VR                 0x004   /* Version */
+#define AVE_GRR                        0x008   /* Global Reset */
+#define AVE_CFGR               0x00c   /* Configuration */
+
+/* Interrupt Register Group */
+#define AVE_GIMR               0x100   /* Global Interrupt Mask */
+#define AVE_GISR               0x104   /* Global Interrupt Status */
+
+/* MAC Register Group */
+#define AVE_TXCR               0x200   /* TX Setup */
+#define AVE_RXCR               0x204   /* RX Setup */
+#define AVE_RXMAC1R            0x208   /* MAC address (lower) */
+#define AVE_RXMAC2R            0x20c   /* MAC address (upper) */
+#define AVE_MDIOCTR            0x214   /* MDIO Control */
+#define AVE_MDIOAR             0x218   /* MDIO Address */
+#define AVE_MDIOWDR            0x21c   /* MDIO Data */
+#define AVE_MDIOSR             0x220   /* MDIO Status */
+#define AVE_MDIORDR            0x224   /* MDIO Rd Data */
+
+/* Descriptor Control Register Group */
+#define AVE_DESCC              0x300   /* Descriptor Control */
+#define AVE_TXDC               0x304   /* TX Descriptor Configuration */
+#define AVE_RXDC0              0x308   /* RX Descriptor Ring0 Configuration */
+#define AVE_IIRQC              0x34c   /* Interval IRQ Control */
+
+/* Packet Filter Register Group */
+#define AVE_PKTF_BASE          0x800   /* PF Base Address */
+#define AVE_PFMBYTE_BASE       0xd00   /* PF Mask Byte Base Address */
+#define AVE_PFMBIT_BASE                0xe00   /* PF Mask Bit Base Address */
+#define AVE_PFSEL_BASE         0xf00   /* PF Selector Base Address */
+#define AVE_PFEN               0xffc   /* Packet Filter Enable */
+#define AVE_PKTF(ent)          (AVE_PKTF_BASE + (ent) * 0x40)
+#define AVE_PFMBYTE(ent)       (AVE_PFMBYTE_BASE + (ent) * 8)
+#define AVE_PFMBIT(ent)                (AVE_PFMBIT_BASE + (ent) * 4)
+#define AVE_PFSEL(ent)         (AVE_PFSEL_BASE + (ent) * 4)
+
+/* 64bit descriptor memory */
+#define AVE_DESC_SIZE_64       12      /* Descriptor Size */
+
+#define AVE_TXDM_64            0x1000  /* Tx Descriptor Memory */
+#define AVE_RXDM_64            0x1c00  /* Rx Descriptor Memory */
+
+#define AVE_TXDM_SIZE_64       0x0ba0  /* Tx Descriptor Memory Size 3KB */
+#define AVE_RXDM_SIZE_64       0x6000  /* Rx Descriptor Memory Size 24KB */
+
+/* 32bit descriptor memory */
+#define AVE_DESC_SIZE_32       8       /* Descriptor Size */
+
+#define AVE_TXDM_32            0x1000  /* Tx Descriptor Memory */
+#define AVE_RXDM_32            0x1800  /* Rx Descriptor Memory */
+
+#define AVE_TXDM_SIZE_32       0x07c0  /* Tx Descriptor Memory Size 2KB */
+#define AVE_RXDM_SIZE_32       0x4000  /* Rx Descriptor Memory Size 16KB */
+
+/* RMII Bridge Register Group */
+#define AVE_RSTCTRL            0x8028  /* Reset control */
+#define AVE_RSTCTRL_RMIIRST    BIT(16)
+#define AVE_LINKSEL            0x8034  /* Link speed setting */
+#define AVE_LINKSEL_100M       BIT(0)
+
+/* AVE_GRR */
+#define AVE_GRR_RXFFR          BIT(5)  /* Reset RxFIFO */
+#define AVE_GRR_PHYRST         BIT(4)  /* Reset external PHY */
+#define AVE_GRR_GRST           BIT(0)  /* Reset all MAC */
+
+/* AVE_CFGR */
+#define AVE_CFGR_FLE           BIT(31) /* Filter Function */
+#define AVE_CFGR_CHE           BIT(30) /* Checksum Function */
+#define AVE_CFGR_MII           BIT(27) /* Func mode (1:MII/RMII, 0:RGMII) */
+#define AVE_CFGR_IPFCEN                BIT(24) /* IP fragment sum Enable */
+
+/* AVE_GISR (common with GIMR) */
+#define AVE_GI_PHY             BIT(24) /* PHY interrupt */
+#define AVE_GI_TX              BIT(16) /* Tx complete */
+#define AVE_GI_RXERR           BIT(8)  /* Receive frame more than max size */
+#define AVE_GI_RXOVF           BIT(7)  /* Overflow at the RxFIFO */
+#define AVE_GI_RXDROP          BIT(6)  /* Drop packet */
+#define AVE_GI_RXIINT          BIT(5)  /* Interval interrupt */
+
+/* AVE_TXCR */
+#define AVE_TXCR_FLOCTR                BIT(18) /* Flow control */
+#define AVE_TXCR_TXSPD_1G      BIT(17)
+#define AVE_TXCR_TXSPD_100     BIT(16)
+
+/* AVE_RXCR */
+#define AVE_RXCR_RXEN          BIT(30) /* Rx enable */
+#define AVE_RXCR_FDUPEN                BIT(22) /* Interface mode */
+#define AVE_RXCR_FLOCTR                BIT(21) /* Flow control */
+#define AVE_RXCR_AFEN          BIT(19) /* MAC address filter */
+#define AVE_RXCR_DRPEN         BIT(18) /* Drop pause frame */
+#define AVE_RXCR_MPSIZ_MASK    GENMASK(10, 0)
+
+/* AVE_MDIOCTR */
+#define AVE_MDIOCTR_RREQ       BIT(3)  /* Read request */
+#define AVE_MDIOCTR_WREQ       BIT(2)  /* Write request */
+
+/* AVE_MDIOSR */
+#define AVE_MDIOSR_STS         BIT(0)  /* access status */
+
+/* AVE_DESCC */
+#define AVE_DESCC_STATUS_MASK  GENMASK(31, 16)
+#define AVE_DESCC_RD0          BIT(8)  /* Enable Rx descriptor Ring0 */
+#define AVE_DESCC_RDSTP                BIT(4)  /* Pause Rx descriptor */
+#define AVE_DESCC_TD           BIT(0)  /* Enable Tx descriptor */
+
+/* AVE_TXDC */
+#define AVE_TXDC_SIZE          GENMASK(27, 16) /* Size of Tx descriptor */
+#define AVE_TXDC_ADDR          GENMASK(11, 0)  /* Start address */
+#define AVE_TXDC_ADDR_START    0
+
+/* AVE_RXDC0 */
+#define AVE_RXDC0_SIZE         GENMASK(30, 16) /* Size of Rx descriptor */
+#define AVE_RXDC0_ADDR         GENMASK(14, 0)  /* Start address */
+#define AVE_RXDC0_ADDR_START   0
+
+/* AVE_IIRQC */
+#define AVE_IIRQC_EN0          BIT(27) /* Enable interval interrupt Ring0 */
+#define AVE_IIRQC_BSCK         GENMASK(15, 0)  /* Interval count unit */
+
+/* Command status for descriptor */
+#define AVE_STS_OWN            BIT(31) /* Descriptor ownership */
+#define AVE_STS_INTR           BIT(29) /* Request for interrupt */
+#define AVE_STS_OK             BIT(27) /* Normal transmit */
+/* TX */
+#define AVE_STS_NOCSUM         BIT(28) /* No use HW checksum */
+#define AVE_STS_1ST            BIT(26) /* Head of buffer chain */
+#define AVE_STS_LAST           BIT(25) /* Tail of buffer chain */
+#define AVE_STS_OWC            BIT(21) /* Out of window,Late Collision */
+#define AVE_STS_EC             BIT(20) /* Excess collision occurred */
+#define AVE_STS_PKTLEN_TX_MASK GENMASK(15, 0)
+/* RX */
+#define AVE_STS_CSSV           BIT(21) /* Checksum check performed */
+#define AVE_STS_CSER           BIT(20) /* Checksum error detected */
+#define AVE_STS_PKTLEN_RX_MASK GENMASK(10, 0)
+
+/* Packet filter */
+#define AVE_PFMBYTE_MASK0      (GENMASK(31, 8) | GENMASK(5, 0))
+#define AVE_PFMBYTE_MASK1      GENMASK(25, 0)
+#define AVE_PFMBIT_MASK                GENMASK(15, 0)
+
+#define AVE_PF_SIZE            17      /* Number of all packet filter */
+#define AVE_PF_MULTICAST_SIZE  7       /* Number of multicast filter */
+
+#define AVE_PFNUM_FILTER       0       /* No.0 */
+#define AVE_PFNUM_UNICAST      1       /* No.1 */
+#define AVE_PFNUM_BROADCAST    2       /* No.2 */
+#define AVE_PFNUM_MULTICAST    11      /* No.11-17 */
+
+/* NETIF Message control */
+#define AVE_DEFAULT_MSG_ENABLE (NETIF_MSG_DRV    |     \
+                                NETIF_MSG_PROBE  |     \
+                                NETIF_MSG_LINK   |     \
+                                NETIF_MSG_TIMER  |     \
+                                NETIF_MSG_IFDOWN |     \
+                                NETIF_MSG_IFUP   |     \
+                                NETIF_MSG_RX_ERR |     \
+                                NETIF_MSG_TX_ERR)
+
+/* Parameter for descriptor */
+#define AVE_NR_TXDESC          32      /* Tx descriptor */
+#define AVE_NR_RXDESC          64      /* Rx descriptor */
+
+#define AVE_DESC_OFS_CMDSTS    0
+#define AVE_DESC_OFS_ADDRL     4
+#define AVE_DESC_OFS_ADDRU     8
+
+/* Parameter for ethernet frame */
+#define AVE_MAX_ETHFRAME       1518
+
+/* Parameter for interrupt */
+#define AVE_INTM_COUNT         20
+#define AVE_FORCE_TXINTCNT     1
+
+#define IS_DESC_64BIT(p)       ((p)->data->is_desc_64bit)
+
+enum desc_id {
+       AVE_DESCID_RX,
+       AVE_DESCID_TX,
+};
+
+enum desc_state {
+       AVE_DESC_RX_PERMIT,
+       AVE_DESC_RX_SUSPEND,
+       AVE_DESC_START,
+       AVE_DESC_STOP,
+};
+
+struct ave_desc {
+       struct sk_buff  *skbs;
+       dma_addr_t      skbs_dma;
+       size_t          skbs_dmalen;
+};
+
+struct ave_desc_info {
+       u32     ndesc;          /* number of descriptor */
+       u32     daddr;          /* start address of descriptor */
+       u32     proc_idx;       /* index of processing packet */
+       u32     done_idx;       /* index of processed packet */
+       struct ave_desc *desc;  /* skb info related descriptor */
+};
+
+struct ave_soc_data {
+       bool    is_desc_64bit;
+};
+
+struct ave_stats {
+       struct  u64_stats_sync  syncp;
+       u64     packets;
+       u64     bytes;
+       u64     errors;
+       u64     dropped;
+       u64     collisions;
+       u64     fifo_errors;
+};
+
+struct ave_private {
+       void __iomem            *base;
+       int                     irq;
+       int                     phy_id;
+       unsigned int            desc_size;
+       u32                     msg_enable;
+       struct clk              *clk;
+       struct reset_control    *rst;
+       phy_interface_t         phy_mode;
+       struct phy_device       *phydev;
+       struct mii_bus          *mdio;
+
+       /* stats */
+       struct ave_stats        stats_rx;
+       struct ave_stats        stats_tx;
+
+       /* NAPI support */
+       struct net_device       *ndev;
+       struct napi_struct      napi_rx;
+       struct napi_struct      napi_tx;
+
+       /* descriptor */
+       struct ave_desc_info    rx;
+       struct ave_desc_info    tx;
+
+       /* flow control */
+       int pause_auto;
+       int pause_rx;
+       int pause_tx;
+
+       const struct ave_soc_data *data;
+};
+
+static u32 ave_desc_read(struct net_device *ndev, enum desc_id id, int entry,
+                        int offset)
+{
+       struct ave_private *priv = netdev_priv(ndev);
+       u32 addr;
+
+       addr = ((id == AVE_DESCID_TX) ? priv->tx.daddr : priv->rx.daddr)
+               + entry * priv->desc_size + offset;
+
+       return readl(priv->base + addr);
+}
+
+static u32 ave_desc_read_cmdsts(struct net_device *ndev, enum desc_id id,
+                               int entry)
+{
+       return ave_desc_read(ndev, id, entry, AVE_DESC_OFS_CMDSTS);
+}
+
+static void ave_desc_write(struct net_device *ndev, enum desc_id id,
+                          int entry, int offset, u32 val)
+{
+       struct ave_private *priv = netdev_priv(ndev);
+       u32 addr;
+
+       addr = ((id == AVE_DESCID_TX) ? priv->tx.daddr : priv->rx.daddr)
+               + entry * priv->desc_size + offset;
+
+       writel(val, priv->base + addr);
+}
+
+static void ave_desc_write_cmdsts(struct net_device *ndev, enum desc_id id,
+                                 int entry, u32 val)
+{
+       ave_desc_write(ndev, id, entry, AVE_DESC_OFS_CMDSTS, val);
+}
+
+static void ave_desc_write_addr(struct net_device *ndev, enum desc_id id,
+                               int entry, dma_addr_t paddr)
+{
+       struct ave_private *priv = netdev_priv(ndev);
+
+       ave_desc_write(ndev, id, entry, AVE_DESC_OFS_ADDRL,
+                      lower_32_bits(paddr));
+       if (IS_DESC_64BIT(priv))
+               ave_desc_write(ndev, id,
+                              entry, AVE_DESC_OFS_ADDRU,
+                              upper_32_bits(paddr));
+}
+
+static u32 ave_irq_disable_all(struct net_device *ndev)
+{
+       struct ave_private *priv = netdev_priv(ndev);
+       u32 ret;
+
+       ret = readl(priv->base + AVE_GIMR);
+       writel(0, priv->base + AVE_GIMR);
+
+       return ret;
+}
+
+static void ave_irq_restore(struct net_device *ndev, u32 val)
+{
+       struct ave_private *priv = netdev_priv(ndev);
+
+       writel(val, priv->base + AVE_GIMR);
+}
+
+static void ave_irq_enable(struct net_device *ndev, u32 bitflag)
+{
+       struct ave_private *priv = netdev_priv(ndev);
+
+       writel(readl(priv->base + AVE_GIMR) | bitflag, priv->base + AVE_GIMR);
+       writel(bitflag, priv->base + AVE_GISR);
+}
+
+static void ave_hw_write_macaddr(struct net_device *ndev,
+                                const unsigned char *mac_addr,
+                                int reg1, int reg2)
+{
+       struct ave_private *priv = netdev_priv(ndev);
+
+       writel(mac_addr[0] | mac_addr[1] << 8 |
+              mac_addr[2] << 16 | mac_addr[3] << 24, priv->base + reg1);
+       writel(mac_addr[4] | mac_addr[5] << 8, priv->base + reg2);
+}
+
+static void ave_hw_read_version(struct net_device *ndev, char *buf, int len)
+{
+       struct ave_private *priv = netdev_priv(ndev);
+       u32 major, minor, vr;
+
+       vr = readl(priv->base + AVE_VR);
+       major = (vr & GENMASK(15, 8)) >> 8;
+       minor = (vr & GENMASK(7, 0));
+       snprintf(buf, len, "v%u.%u", major, minor);
+}
+
+static void ave_ethtool_get_drvinfo(struct net_device *ndev,
+                                   struct ethtool_drvinfo *info)
+{
+       struct device *dev = ndev->dev.parent;
+
+       strlcpy(info->driver, dev->driver->name, sizeof(info->driver));
+       strlcpy(info->bus_info, dev_name(dev), sizeof(info->bus_info));
+       ave_hw_read_version(ndev, info->fw_version, sizeof(info->fw_version));
+}
+
+static u32 ave_ethtool_get_msglevel(struct net_device *ndev)
+{
+       struct ave_private *priv = netdev_priv(ndev);
+
+       return priv->msg_enable;
+}
+
+static void ave_ethtool_set_msglevel(struct net_device *ndev, u32 val)
+{
+       struct ave_private *priv = netdev_priv(ndev);
+
+       priv->msg_enable = val;
+}
+
+static void ave_ethtool_get_wol(struct net_device *ndev,
+                               struct ethtool_wolinfo *wol)
+{
+       wol->supported = 0;
+       wol->wolopts   = 0;
+
+       if (ndev->phydev)
+               phy_ethtool_get_wol(ndev->phydev, wol);
+}
+
+static int ave_ethtool_set_wol(struct net_device *ndev,
+                              struct ethtool_wolinfo *wol)
+{
+       int ret;
+
+       if (!ndev->phydev ||
+           (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE)))
+               return -EOPNOTSUPP;
+
+       ret = phy_ethtool_set_wol(ndev->phydev, wol);
+       if (!ret)
+               device_set_wakeup_enable(&ndev->dev, !!wol->wolopts);
+
+       return ret;
+}
+
+static void ave_ethtool_get_pauseparam(struct net_device *ndev,
+                                      struct ethtool_pauseparam *pause)
+{
+       struct ave_private *priv = netdev_priv(ndev);
+
+       pause->autoneg  = priv->pause_auto;
+       pause->rx_pause = priv->pause_rx;
+       pause->tx_pause = priv->pause_tx;
+}
+
+static int ave_ethtool_set_pauseparam(struct net_device *ndev,
+                                     struct ethtool_pauseparam *pause)
+{
+       struct ave_private *priv = netdev_priv(ndev);
+       struct phy_device *phydev = ndev->phydev;
+
+       if (!phydev)
+               return -EINVAL;
+
+       priv->pause_auto = pause->autoneg;
+       priv->pause_rx   = pause->rx_pause;
+       priv->pause_tx   = pause->tx_pause;
+
+       phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
+       if (pause->rx_pause)
+               phydev->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
+       if (pause->tx_pause)
+               phydev->advertising ^= ADVERTISED_Asym_Pause;
+
+       if (pause->autoneg) {
+               if (netif_running(ndev))
+                       phy_start_aneg(phydev);
+       }
+
+       return 0;
+}
+
+static const struct ethtool_ops ave_ethtool_ops = {
+       .get_link_ksettings     = phy_ethtool_get_link_ksettings,
+       .set_link_ksettings     = phy_ethtool_set_link_ksettings,
+       .get_drvinfo            = ave_ethtool_get_drvinfo,
+       .nway_reset             = phy_ethtool_nway_reset,
+       .get_link               = ethtool_op_get_link,
+       .get_msglevel           = ave_ethtool_get_msglevel,
+       .set_msglevel           = ave_ethtool_set_msglevel,
+       .get_wol                = ave_ethtool_get_wol,
+       .set_wol                = ave_ethtool_set_wol,
+       .get_pauseparam         = ave_ethtool_get_pauseparam,
+       .set_pauseparam         = ave_ethtool_set_pauseparam,
+};
+
+static int ave_mdiobus_read(struct mii_bus *bus, int phyid, int regnum)
+{
+       struct net_device *ndev = bus->priv;
+       struct ave_private *priv;
+       u32 mdioctl, mdiosr;
+       int ret;
+
+       priv = netdev_priv(ndev);
+
+       /* write address */
+       writel((phyid << 8) | regnum, priv->base + AVE_MDIOAR);
+
+       /* read request */
+       mdioctl = readl(priv->base + AVE_MDIOCTR);
+       writel((mdioctl | AVE_MDIOCTR_RREQ) & ~AVE_MDIOCTR_WREQ,
+              priv->base + AVE_MDIOCTR);
+
+       ret = readl_poll_timeout(priv->base + AVE_MDIOSR, mdiosr,
+                                !(mdiosr & AVE_MDIOSR_STS), 20, 2000);
+       if (ret) {
+               netdev_err(ndev, "failed to read (phy:%d reg:%x)\n",
+                          phyid, regnum);
+               return ret;
+       }
+
+       return readl(priv->base + AVE_MDIORDR) & GENMASK(15, 0);
+}
+
+static int ave_mdiobus_write(struct mii_bus *bus, int phyid, int regnum,
+                            u16 val)
+{
+       struct net_device *ndev = bus->priv;
+       struct ave_private *priv;
+       u32 mdioctl, mdiosr;
+       int ret;
+
+       priv = netdev_priv(ndev);
+
+       /* write address */
+       writel((phyid << 8) | regnum, priv->base + AVE_MDIOAR);
+
+       /* write data */
+       writel(val, priv->base + AVE_MDIOWDR);
+
+       /* write request */
+       mdioctl = readl(priv->base + AVE_MDIOCTR);
+       writel((mdioctl | AVE_MDIOCTR_WREQ) & ~AVE_MDIOCTR_RREQ,
+              priv->base + AVE_MDIOCTR);
+
+       ret = readl_poll_timeout(priv->base + AVE_MDIOSR, mdiosr,
+                                !(mdiosr & AVE_MDIOSR_STS), 20, 2000);
+       if (ret)
+               netdev_err(ndev, "failed to write (phy:%d reg:%x)\n",
+                          phyid, regnum);
+
+       return ret;
+}
+
+static int ave_dma_map(struct net_device *ndev, struct ave_desc *desc,
+                      void *ptr, size_t len, enum dma_data_direction dir,
+                      dma_addr_t *paddr)
+{
+       dma_addr_t map_addr;
+
+       map_addr = dma_map_single(ndev->dev.parent, ptr, len, dir);
+       if (unlikely(dma_mapping_error(ndev->dev.parent, map_addr)))
+               return -ENOMEM;
+
+       desc->skbs_dma = map_addr;
+       desc->skbs_dmalen = len;
+       *paddr = map_addr;
+
+       return 0;
+}
+
+static void ave_dma_unmap(struct net_device *ndev, struct ave_desc *desc,
+                         enum dma_data_direction dir)
+{
+       if (!desc->skbs_dma)
+               return;
+
+       dma_unmap_single(ndev->dev.parent,
+                        desc->skbs_dma, desc->skbs_dmalen, dir);
+       desc->skbs_dma = 0;
+}
+
+/* Prepare Rx descriptor and memory */
+static int ave_rxdesc_prepare(struct net_device *ndev, int entry)
+{
+       struct ave_private *priv = netdev_priv(ndev);
+       struct sk_buff *skb;
+       dma_addr_t paddr;
+       int ret;
+
+       skb = priv->rx.desc[entry].skbs;
+       if (!skb) {
+               skb = netdev_alloc_skb_ip_align(ndev,
+                                               AVE_MAX_ETHFRAME);
+               if (!skb) {
+                       netdev_err(ndev, "can't allocate skb for Rx\n");
+                       return -ENOMEM;
+               }
+       }
+
+       /* set disable to cmdsts */
+       ave_desc_write_cmdsts(ndev, AVE_DESCID_RX, entry,
+                             AVE_STS_INTR | AVE_STS_OWN);
+
+       /* map Rx buffer
+        * Rx buffer set to the Rx descriptor has two restrictions:
+        * - Rx buffer address is 4 byte aligned.
+        * - Rx buffer begins with 2 byte headroom, and data will be put from
+        *   (buffer + 2).
+        * To satisfy this, specify the address to put back the buffer
+        * pointer advanced by NET_IP_ALIGN by netdev_alloc_skb_ip_align(),
+        * and expand the map size by NET_IP_ALIGN.
+        */
+       ret = ave_dma_map(ndev, &priv->rx.desc[entry],
+                         skb->data - NET_IP_ALIGN,
+                         AVE_MAX_ETHFRAME + NET_IP_ALIGN,
+                         DMA_FROM_DEVICE, &paddr);
+       if (ret) {
+               netdev_err(ndev, "can't map skb for Rx\n");
+               dev_kfree_skb_any(skb);
+               return ret;
+       }
+       priv->rx.desc[entry].skbs = skb;
+
+       /* set buffer pointer */
+       ave_desc_write_addr(ndev, AVE_DESCID_RX, entry, paddr);
+
+       /* set enable to cmdsts */
+       ave_desc_write_cmdsts(ndev, AVE_DESCID_RX, entry,
+                             AVE_STS_INTR | AVE_MAX_ETHFRAME);
+
+       return ret;
+}
+
+/* Switch state of descriptor */
+static int ave_desc_switch(struct net_device *ndev, enum desc_state state)
+{
+       struct ave_private *priv = netdev_priv(ndev);
+       int ret = 0;
+       u32 val;
+
+       switch (state) {
+       case AVE_DESC_START:
+               writel(AVE_DESCC_TD | AVE_DESCC_RD0, priv->base + AVE_DESCC);
+               break;
+
+       case AVE_DESC_STOP:
+               writel(0, priv->base + AVE_DESCC);
+               if (readl_poll_timeout(priv->base + AVE_DESCC, val, !val,
+                                      150, 15000)) {
+                       netdev_err(ndev, "can't stop descriptor\n");
+                       ret = -EBUSY;
+               }
+               break;
+
+       case AVE_DESC_RX_SUSPEND:
+               val = readl(priv->base + AVE_DESCC);
+               val |= AVE_DESCC_RDSTP;
+               val &= ~AVE_DESCC_STATUS_MASK;
+               writel(val, priv->base + AVE_DESCC);
+               if (readl_poll_timeout(priv->base + AVE_DESCC, val,
+                                      val & (AVE_DESCC_RDSTP << 16),
+                                      150, 150000)) {
+                       netdev_err(ndev, "can't suspend descriptor\n");
+                       ret = -EBUSY;
+               }
+               break;
+
+       case AVE_DESC_RX_PERMIT:
+               val = readl(priv->base + AVE_DESCC);
+               val &= ~AVE_DESCC_RDSTP;
+               val &= ~AVE_DESCC_STATUS_MASK;
+               writel(val, priv->base + AVE_DESCC);
+               break;
+
+       default:
+               ret = -EINVAL;
+               break;
+       }
+
+       return ret;
+}
+
+static int ave_tx_complete(struct net_device *ndev)
+{
+       struct ave_private *priv = netdev_priv(ndev);
+       u32 proc_idx, done_idx, ndesc, cmdsts;
+       unsigned int nr_freebuf = 0;
+       unsigned int tx_packets = 0;
+       unsigned int tx_bytes = 0;
+
+       proc_idx = priv->tx.proc_idx;
+       done_idx = priv->tx.done_idx;
+       ndesc    = priv->tx.ndesc;
+
+       /* free pre-stored skb from done_idx to proc_idx */
+       while (proc_idx != done_idx) {
+               cmdsts = ave_desc_read_cmdsts(ndev, AVE_DESCID_TX, done_idx);
+
+               /* do nothing if owner is HW (==1 for Tx) */
+               if (cmdsts & AVE_STS_OWN)
+                       break;
+
+               /* check Tx status and updates statistics */
+               if (cmdsts & AVE_STS_OK) {
+                       tx_bytes += cmdsts & AVE_STS_PKTLEN_TX_MASK;
+                       /* success */
+                       if (cmdsts & AVE_STS_LAST)
+                               tx_packets++;
+               } else {
+                       /* error */
+                       if (cmdsts & AVE_STS_LAST) {
+                               priv->stats_tx.errors++;
+                               if (cmdsts & (AVE_STS_OWC | AVE_STS_EC))
+                                       priv->stats_tx.collisions++;
+                       }
+               }
+
+               /* release skb */
+               if (priv->tx.desc[done_idx].skbs) {
+                       ave_dma_unmap(ndev, &priv->tx.desc[done_idx],
+                                     DMA_TO_DEVICE);
+                       dev_consume_skb_any(priv->tx.desc[done_idx].skbs);
+                       priv->tx.desc[done_idx].skbs = NULL;
+                       nr_freebuf++;
+               }
+               done_idx = (done_idx + 1) % ndesc;
+       }
+
+       priv->tx.done_idx = done_idx;
+
+       /* update stats */
+       u64_stats_update_begin(&priv->stats_tx.syncp);
+       priv->stats_tx.packets += tx_packets;
+       priv->stats_tx.bytes   += tx_bytes;
+       u64_stats_update_end(&priv->stats_tx.syncp);
+
+       /* wake queue for freeing buffer */
+       if (unlikely(netif_queue_stopped(ndev)) && nr_freebuf)
+               netif_wake_queue(ndev);
+
+       return nr_freebuf;
+}
+
+static int ave_rx_receive(struct net_device *ndev, int num)
+{
+       struct ave_private *priv = netdev_priv(ndev);
+       unsigned int rx_packets = 0;
+       unsigned int rx_bytes = 0;
+       u32 proc_idx, done_idx;
+       struct sk_buff *skb;
+       unsigned int pktlen;
+       int restpkt, npkts;
+       u32 ndesc, cmdsts;
+
+       proc_idx = priv->rx.proc_idx;
+       done_idx = priv->rx.done_idx;
+       ndesc    = priv->rx.ndesc;
+       restpkt  = ((proc_idx + ndesc - 1) - done_idx) % ndesc;
+
+       for (npkts = 0; npkts < num; npkts++) {
+               /* we can't receive more packet, so fill desc quickly */
+               if (--restpkt < 0)
+                       break;
+
+               cmdsts = ave_desc_read_cmdsts(ndev, AVE_DESCID_RX, proc_idx);
+
+               /* do nothing if owner is HW (==0 for Rx) */
+               if (!(cmdsts & AVE_STS_OWN))
+                       break;
+
+               if (!(cmdsts & AVE_STS_OK)) {
+                       priv->stats_rx.errors++;
+                       proc_idx = (proc_idx + 1) % ndesc;
+                       continue;
+               }
+
+               pktlen = cmdsts & AVE_STS_PKTLEN_RX_MASK;
+
+               /* get skbuff for rx */
+               skb = priv->rx.desc[proc_idx].skbs;
+               priv->rx.desc[proc_idx].skbs = NULL;
+
+               ave_dma_unmap(ndev, &priv->rx.desc[proc_idx], DMA_FROM_DEVICE);
+
+               skb->dev = ndev;
+               skb_put(skb, pktlen);
+               skb->protocol = eth_type_trans(skb, ndev);
+
+               if ((cmdsts & AVE_STS_CSSV) && (!(cmdsts & AVE_STS_CSER)))
+                       skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+               rx_packets++;
+               rx_bytes += pktlen;
+
+               netif_receive_skb(skb);
+
+               proc_idx = (proc_idx + 1) % ndesc;
+       }
+
+       priv->rx.proc_idx = proc_idx;
+
+       /* update stats */
+       u64_stats_update_begin(&priv->stats_rx.syncp);
+       priv->stats_rx.packets += rx_packets;
+       priv->stats_rx.bytes   += rx_bytes;
+       u64_stats_update_end(&priv->stats_rx.syncp);
+
+       /* refill the Rx buffers */
+       while (proc_idx != done_idx) {
+               if (ave_rxdesc_prepare(ndev, done_idx))
+                       break;
+               done_idx = (done_idx + 1) % ndesc;
+       }
+
+       priv->rx.done_idx = done_idx;
+
+       return npkts;
+}
+
+static int ave_napi_poll_rx(struct napi_struct *napi, int budget)
+{
+       struct ave_private *priv;
+       struct net_device *ndev;
+       int num;
+
+       priv = container_of(napi, struct ave_private, napi_rx);
+       ndev = priv->ndev;
+
+       num = ave_rx_receive(ndev, budget);
+       if (num < budget) {
+               napi_complete_done(napi, num);
+
+               /* enable Rx interrupt when NAPI finishes */
+               ave_irq_enable(ndev, AVE_GI_RXIINT);
+       }
+
+       return num;
+}
+
+static int ave_napi_poll_tx(struct napi_struct *napi, int budget)
+{
+       struct ave_private *priv;
+       struct net_device *ndev;
+       int num;
+
+       priv = container_of(napi, struct ave_private, napi_tx);
+       ndev = priv->ndev;
+
+       num = ave_tx_complete(ndev);
+       napi_complete(napi);
+
+       /* enable Tx interrupt when NAPI finishes */
+       ave_irq_enable(ndev, AVE_GI_TX);
+
+       return num;
+}
+
+static void ave_global_reset(struct net_device *ndev)
+{
+       struct ave_private *priv = netdev_priv(ndev);
+       u32 val;
+
+       /* set config register */
+       val = AVE_CFGR_FLE | AVE_CFGR_IPFCEN | AVE_CFGR_CHE;
+       if (!phy_interface_mode_is_rgmii(priv->phy_mode))
+               val |= AVE_CFGR_MII;
+       writel(val, priv->base + AVE_CFGR);
+
+       /* reset RMII register */
+       val = readl(priv->base + AVE_RSTCTRL);
+       val &= ~AVE_RSTCTRL_RMIIRST;
+       writel(val, priv->base + AVE_RSTCTRL);
+
+       /* assert reset */
+       writel(AVE_GRR_GRST | AVE_GRR_PHYRST, priv->base + AVE_GRR);
+       msleep(20);
+
+       /* 1st, negate PHY reset only */
+       writel(AVE_GRR_GRST, priv->base + AVE_GRR);
+       msleep(40);
+
+       /* negate reset */
+       writel(0, priv->base + AVE_GRR);
+       msleep(40);
+
+       /* negate RMII register */
+       val = readl(priv->base + AVE_RSTCTRL);
+       val |= AVE_RSTCTRL_RMIIRST;
+       writel(val, priv->base + AVE_RSTCTRL);
+
+       ave_irq_disable_all(ndev);
+}
+
+static void ave_rxfifo_reset(struct net_device *ndev)
+{
+       struct ave_private *priv = netdev_priv(ndev);
+       u32 rxcr_org;
+
+       /* save and disable MAC receive op */
+       rxcr_org = readl(priv->base + AVE_RXCR);
+       writel(rxcr_org & (~AVE_RXCR_RXEN), priv->base + AVE_RXCR);
+
+       /* suspend Rx descriptor */
+       ave_desc_switch(ndev, AVE_DESC_RX_SUSPEND);
+
+       /* receive all packets before descriptor starts */
+       ave_rx_receive(ndev, priv->rx.ndesc);
+
+       /* assert reset */
+       writel(AVE_GRR_RXFFR, priv->base + AVE_GRR);
+       usleep_range(40, 50);
+
+       /* negate reset */
+       writel(0, priv->base + AVE_GRR);
+       usleep_range(10, 20);
+
+       /* negate interrupt status */
+       writel(AVE_GI_RXOVF, priv->base + AVE_GISR);
+
+       /* permit descriptor */
+       ave_desc_switch(ndev, AVE_DESC_RX_PERMIT);
+
+       /* restore MAC reccieve op */
+       writel(rxcr_org, priv->base + AVE_RXCR);
+}
+
+static irqreturn_t ave_irq_handler(int irq, void *netdev)
+{
+       struct net_device *ndev = (struct net_device *)netdev;
+       struct ave_private *priv = netdev_priv(ndev);
+       u32 gimr_val, gisr_val;
+
+       gimr_val = ave_irq_disable_all(ndev);
+
+       /* get interrupt status */
+       gisr_val = readl(priv->base + AVE_GISR);
+
+       /* PHY */
+       if (gisr_val & AVE_GI_PHY)
+               writel(AVE_GI_PHY, priv->base + AVE_GISR);
+
+       /* check exceeding packet */
+       if (gisr_val & AVE_GI_RXERR) {
+               writel(AVE_GI_RXERR, priv->base + AVE_GISR);
+               netdev_err(ndev, "receive a packet exceeding frame buffer\n");
+       }
+
+       gisr_val &= gimr_val;
+       if (!gisr_val)
+               goto exit_isr;
+
+       /* RxFIFO overflow */
+       if (gisr_val & AVE_GI_RXOVF) {
+               priv->stats_rx.fifo_errors++;
+               ave_rxfifo_reset(ndev);
+               goto exit_isr;
+       }
+
+       /* Rx drop */
+       if (gisr_val & AVE_GI_RXDROP) {
+               priv->stats_rx.dropped++;
+               writel(AVE_GI_RXDROP, priv->base + AVE_GISR);
+       }
+
+       /* Rx interval */
+       if (gisr_val & AVE_GI_RXIINT) {
+               napi_schedule(&priv->napi_rx);
+               /* still force to disable Rx interrupt until NAPI finishes */
+               gimr_val &= ~AVE_GI_RXIINT;
+       }
+
+       /* Tx completed */
+       if (gisr_val & AVE_GI_TX) {
+               napi_schedule(&priv->napi_tx);
+               /* still force to disable Tx interrupt until NAPI finishes */
+               gimr_val &= ~AVE_GI_TX;
+       }
+
+exit_isr:
+       ave_irq_restore(ndev, gimr_val);
+
+       return IRQ_HANDLED;
+}
+
+static int ave_pfsel_start(struct net_device *ndev, unsigned int entry)
+{
+       struct ave_private *priv = netdev_priv(ndev);
+       u32 val;
+
+       if (WARN_ON(entry > AVE_PF_SIZE))
+               return -EINVAL;
+
+       val = readl(priv->base + AVE_PFEN);
+       writel(val | BIT(entry), priv->base + AVE_PFEN);
+
+       return 0;
+}
+
+static int ave_pfsel_stop(struct net_device *ndev, unsigned int entry)
+{
+       struct ave_private *priv = netdev_priv(ndev);
+       u32 val;
+
+       if (WARN_ON(entry > AVE_PF_SIZE))
+               return -EINVAL;
+
+       val = readl(priv->base + AVE_PFEN);
+       writel(val & ~BIT(entry), priv->base + AVE_PFEN);
+
+       return 0;
+}
+
+static int ave_pfsel_set_macaddr(struct net_device *ndev,
+                                unsigned int entry,
+                                const unsigned char *mac_addr,
+                                unsigned int set_size)
+{
+       struct ave_private *priv = netdev_priv(ndev);
+
+       if (WARN_ON(entry > AVE_PF_SIZE))
+               return -EINVAL;
+       if (WARN_ON(set_size > 6))
+               return -EINVAL;
+
+       ave_pfsel_stop(ndev, entry);
+
+       /* set MAC address for the filter */
+       ave_hw_write_macaddr(ndev, mac_addr,
+                            AVE_PKTF(entry), AVE_PKTF(entry) + 4);
+
+       /* set byte mask */
+       writel(GENMASK(31, set_size) & AVE_PFMBYTE_MASK0,
+              priv->base + AVE_PFMBYTE(entry));
+       writel(AVE_PFMBYTE_MASK1, priv->base + AVE_PFMBYTE(entry) + 4);
+
+       /* set bit mask filter */
+       writel(AVE_PFMBIT_MASK, priv->base + AVE_PFMBIT(entry));
+
+       /* set selector to ring 0 */
+       writel(0, priv->base + AVE_PFSEL(entry));
+
+       /* restart filter */
+       ave_pfsel_start(ndev, entry);
+
+       return 0;
+}
+
+static void ave_pfsel_set_promisc(struct net_device *ndev,
+                                 unsigned int entry, u32 rxring)
+{
+       struct ave_private *priv = netdev_priv(ndev);
+
+       if (WARN_ON(entry > AVE_PF_SIZE))
+               return;
+
+       ave_pfsel_stop(ndev, entry);
+
+       /* set byte mask */
+       writel(AVE_PFMBYTE_MASK0, priv->base + AVE_PFMBYTE(entry));
+       writel(AVE_PFMBYTE_MASK1, priv->base + AVE_PFMBYTE(entry) + 4);
+
+       /* set bit mask filter */
+       writel(AVE_PFMBIT_MASK, priv->base + AVE_PFMBIT(entry));
+
+       /* set selector to rxring */
+       writel(rxring, priv->base + AVE_PFSEL(entry));
+
+       ave_pfsel_start(ndev, entry);
+}
+
+static void ave_pfsel_init(struct net_device *ndev)
+{
+       unsigned char bcast_mac[ETH_ALEN];
+       int i;
+
+       eth_broadcast_addr(bcast_mac);
+
+       for (i = 0; i < AVE_PF_SIZE; i++)
+               ave_pfsel_stop(ndev, i);
+
+       /* promiscious entry, select ring 0 */
+       ave_pfsel_set_promisc(ndev, AVE_PFNUM_FILTER, 0);
+
+       /* unicast entry */
+       ave_pfsel_set_macaddr(ndev, AVE_PFNUM_UNICAST, ndev->dev_addr, 6);
+
+       /* broadcast entry */
+       ave_pfsel_set_macaddr(ndev, AVE_PFNUM_BROADCAST, bcast_mac, 6);
+}
+
+static void ave_phy_adjust_link(struct net_device *ndev)
+{
+       struct ave_private *priv = netdev_priv(ndev);
+       struct phy_device *phydev = ndev->phydev;
+       u32 val, txcr, rxcr, rxcr_org;
+       u16 rmt_adv = 0, lcl_adv = 0;
+       u8 cap;
+
+       /* set RGMII speed */
+       val = readl(priv->base + AVE_TXCR);
+       val &= ~(AVE_TXCR_TXSPD_100 | AVE_TXCR_TXSPD_1G);
+
+       if (phy_interface_is_rgmii(phydev) && phydev->speed == SPEED_1000)
+               val |= AVE_TXCR_TXSPD_1G;
+       else if (phydev->speed == SPEED_100)
+               val |= AVE_TXCR_TXSPD_100;
+
+       writel(val, priv->base + AVE_TXCR);
+
+       /* set RMII speed (100M/10M only) */
+       if (!phy_interface_is_rgmii(phydev)) {
+               val = readl(priv->base + AVE_LINKSEL);
+               if (phydev->speed == SPEED_10)
+                       val &= ~AVE_LINKSEL_100M;
+               else
+                       val |= AVE_LINKSEL_100M;
+               writel(val, priv->base + AVE_LINKSEL);
+       }
+
+       /* check current RXCR/TXCR */
+       rxcr = readl(priv->base + AVE_RXCR);
+       txcr = readl(priv->base + AVE_TXCR);
+       rxcr_org = rxcr;
+
+       if (phydev->duplex) {
+               rxcr |= AVE_RXCR_FDUPEN;
+
+               if (phydev->pause)
+                       rmt_adv |= LPA_PAUSE_CAP;
+               if (phydev->asym_pause)
+                       rmt_adv |= LPA_PAUSE_ASYM;
+               if (phydev->advertising & ADVERTISED_Pause)
+                       lcl_adv |= ADVERTISE_PAUSE_CAP;
+               if (phydev->advertising & ADVERTISED_Asym_Pause)
+                       lcl_adv |= ADVERTISE_PAUSE_ASYM;
+
+               cap = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
+               if (cap & FLOW_CTRL_TX)
+                       txcr |= AVE_TXCR_FLOCTR;
+               else
+                       txcr &= ~AVE_TXCR_FLOCTR;
+               if (cap & FLOW_CTRL_RX)
+                       rxcr |= AVE_RXCR_FLOCTR;
+               else
+                       rxcr &= ~AVE_RXCR_FLOCTR;
+       } else {
+               rxcr &= ~AVE_RXCR_FDUPEN;
+               rxcr &= ~AVE_RXCR_FLOCTR;
+               txcr &= ~AVE_TXCR_FLOCTR;
+       }
+
+       if (rxcr_org != rxcr) {
+               /* disable Rx mac */
+               writel(rxcr & ~AVE_RXCR_RXEN, priv->base + AVE_RXCR);
+               /* change and enable TX/Rx mac */
+               writel(txcr, priv->base + AVE_TXCR);
+               writel(rxcr, priv->base + AVE_RXCR);
+       }
+
+       phy_print_status(phydev);
+}
+
+static void ave_macaddr_init(struct net_device *ndev)
+{
+       ave_hw_write_macaddr(ndev, ndev->dev_addr, AVE_RXMAC1R, AVE_RXMAC2R);
+
+       /* pfsel unicast entry */
+       ave_pfsel_set_macaddr(ndev, AVE_PFNUM_UNICAST, ndev->dev_addr, 6);
+}
+
+static int ave_init(struct net_device *ndev)
+{
+       struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
+       struct ave_private *priv = netdev_priv(ndev);
+       struct device *dev = ndev->dev.parent;
+       struct device_node *np = dev->of_node;
+       struct device_node *mdio_np;
+       struct phy_device *phydev;
+       int ret;
+
+       /* enable clk because of hw access until ndo_open */
+       ret = clk_prepare_enable(priv->clk);
+       if (ret) {
+               dev_err(dev, "can't enable clock\n");
+               return ret;
+       }
+       ret = reset_control_deassert(priv->rst);
+       if (ret) {
+               dev_err(dev, "can't deassert reset\n");
+               goto out_clk_disable;
+       }
+
+       ave_global_reset(ndev);
+
+       mdio_np = of_get_child_by_name(np, "mdio");
+       if (!mdio_np) {
+               dev_err(dev, "mdio node not found\n");
+               ret = -EINVAL;
+               goto out_reset_assert;
+       }
+       ret = of_mdiobus_register(priv->mdio, mdio_np);
+       of_node_put(mdio_np);
+       if (ret) {
+               dev_err(dev, "failed to register mdiobus\n");
+               goto out_reset_assert;
+       }
+
+       phydev = of_phy_get_and_connect(ndev, np, ave_phy_adjust_link);
+       if (!phydev) {
+               dev_err(dev, "could not attach to PHY\n");
+               ret = -ENODEV;
+               goto out_mdio_unregister;
+       }
+
+       priv->phydev = phydev;
+
+       phy_ethtool_get_wol(phydev, &wol);
+       device_set_wakeup_capable(&ndev->dev, !!wol.supported);
+
+       if (!phy_interface_is_rgmii(phydev)) {
+               phydev->supported &= ~PHY_GBIT_FEATURES;
+               phydev->supported |= PHY_BASIC_FEATURES;
+       }
+       phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+
+       phy_attached_info(phydev);
+
+       return 0;
+
+out_mdio_unregister:
+       mdiobus_unregister(priv->mdio);
+out_reset_assert:
+       reset_control_assert(priv->rst);
+out_clk_disable:
+       clk_disable_unprepare(priv->clk);
+
+       return ret;
+}
+
+static void ave_uninit(struct net_device *ndev)
+{
+       struct ave_private *priv = netdev_priv(ndev);
+
+       phy_disconnect(priv->phydev);
+       mdiobus_unregister(priv->mdio);
+
+       /* disable clk because of hw access after ndo_stop */
+       reset_control_assert(priv->rst);
+       clk_disable_unprepare(priv->clk);
+}
+
+static int ave_open(struct net_device *ndev)
+{
+       struct ave_private *priv = netdev_priv(ndev);
+       int entry;
+       int ret;
+       u32 val;
+
+       ret = request_irq(priv->irq, ave_irq_handler, IRQF_SHARED, ndev->name,
+                         ndev);
+       if (ret)
+               return ret;
+
+       priv->tx.desc = kcalloc(priv->tx.ndesc, sizeof(*priv->tx.desc),
+                               GFP_KERNEL);
+       if (!priv->tx.desc) {
+               ret = -ENOMEM;
+               goto out_free_irq;
+       }
+
+       priv->rx.desc = kcalloc(priv->rx.ndesc, sizeof(*priv->rx.desc),
+                               GFP_KERNEL);
+       if (!priv->rx.desc) {
+               kfree(priv->tx.desc);
+               ret = -ENOMEM;
+               goto out_free_irq;
+       }
+
+       /* initialize Tx work and descriptor */
+       priv->tx.proc_idx = 0;
+       priv->tx.done_idx = 0;
+       for (entry = 0; entry < priv->tx.ndesc; entry++) {
+               ave_desc_write_cmdsts(ndev, AVE_DESCID_TX, entry, 0);
+               ave_desc_write_addr(ndev, AVE_DESCID_TX, entry, 0);
+       }
+       writel(AVE_TXDC_ADDR_START |
+              (((priv->tx.ndesc * priv->desc_size) << 16) & AVE_TXDC_SIZE),
+              priv->base + AVE_TXDC);
+
+       /* initialize Rx work and descriptor */
+       priv->rx.proc_idx = 0;
+       priv->rx.done_idx = 0;
+       for (entry = 0; entry < priv->rx.ndesc; entry++) {
+               if (ave_rxdesc_prepare(ndev, entry))
+                       break;
+       }
+       writel(AVE_RXDC0_ADDR_START |
+              (((priv->rx.ndesc * priv->desc_size) << 16) & AVE_RXDC0_SIZE),
+              priv->base + AVE_RXDC0);
+
+       ave_desc_switch(ndev, AVE_DESC_START);
+
+       ave_pfsel_init(ndev);
+       ave_macaddr_init(ndev);
+
+       /* set Rx configuration */
+       /* full duplex, enable pause drop, enalbe flow control */
+       val = AVE_RXCR_RXEN | AVE_RXCR_FDUPEN | AVE_RXCR_DRPEN |
+               AVE_RXCR_FLOCTR | (AVE_MAX_ETHFRAME & AVE_RXCR_MPSIZ_MASK);
+       writel(val, priv->base + AVE_RXCR);
+
+       /* set Tx configuration */
+       /* enable flow control, disable loopback */
+       writel(AVE_TXCR_FLOCTR, priv->base + AVE_TXCR);
+
+       /* enable timer, clear EN,INTM, and mask interval unit(BSCK) */
+       val = readl(priv->base + AVE_IIRQC) & AVE_IIRQC_BSCK;
+       val |= AVE_IIRQC_EN0 | (AVE_INTM_COUNT << 16);
+       writel(val, priv->base + AVE_IIRQC);
+
+       val = AVE_GI_RXIINT | AVE_GI_RXOVF | AVE_GI_TX;
+       ave_irq_restore(ndev, val);
+
+       napi_enable(&priv->napi_rx);
+       napi_enable(&priv->napi_tx);
+
+       phy_start(ndev->phydev);
+       phy_start_aneg(ndev->phydev);
+       netif_start_queue(ndev);
+
+       return 0;
+
+out_free_irq:
+       disable_irq(priv->irq);
+       free_irq(priv->irq, ndev);
+
+       return ret;
+}
+
+static int ave_stop(struct net_device *ndev)
+{
+       struct ave_private *priv = netdev_priv(ndev);
+       int entry;
+
+       ave_irq_disable_all(ndev);
+       disable_irq(priv->irq);
+       free_irq(priv->irq, ndev);
+
+       netif_tx_disable(ndev);
+       phy_stop(ndev->phydev);
+       napi_disable(&priv->napi_tx);
+       napi_disable(&priv->napi_rx);
+
+       ave_desc_switch(ndev, AVE_DESC_STOP);
+
+       /* free Tx buffer */
+       for (entry = 0; entry < priv->tx.ndesc; entry++) {
+               if (!priv->tx.desc[entry].skbs)
+                       continue;
+
+               ave_dma_unmap(ndev, &priv->tx.desc[entry], DMA_TO_DEVICE);
+               dev_kfree_skb_any(priv->tx.desc[entry].skbs);
+               priv->tx.desc[entry].skbs = NULL;
+       }
+       priv->tx.proc_idx = 0;
+       priv->tx.done_idx = 0;
+
+       /* free Rx buffer */
+       for (entry = 0; entry < priv->rx.ndesc; entry++) {
+               if (!priv->rx.desc[entry].skbs)
+                       continue;
+
+               ave_dma_unmap(ndev, &priv->rx.desc[entry], DMA_FROM_DEVICE);
+               dev_kfree_skb_any(priv->rx.desc[entry].skbs);
+               priv->rx.desc[entry].skbs = NULL;
+       }
+       priv->rx.proc_idx = 0;
+       priv->rx.done_idx = 0;
+
+       kfree(priv->tx.desc);
+       kfree(priv->rx.desc);
+
+       return 0;
+}
+
+static int ave_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+       struct ave_private *priv = netdev_priv(ndev);
+       u32 proc_idx, done_idx, ndesc, cmdsts;
+       int ret, freepkt;
+       dma_addr_t paddr;
+
+       proc_idx = priv->tx.proc_idx;
+       done_idx = priv->tx.done_idx;
+       ndesc = priv->tx.ndesc;
+       freepkt = ((done_idx + ndesc - 1) - proc_idx) % ndesc;
+
+       /* stop queue when not enough entry */
+       if (unlikely(freepkt < 1)) {
+               netif_stop_queue(ndev);
+               return NETDEV_TX_BUSY;
+       }
+
+       /* add padding for short packet */
+       if (skb_put_padto(skb, ETH_ZLEN)) {
+               priv->stats_tx.dropped++;
+               return NETDEV_TX_OK;
+       }
+
+       /* map Tx buffer
+        * Tx buffer set to the Tx descriptor doesn't have any restriction.
+        */
+       ret = ave_dma_map(ndev, &priv->tx.desc[proc_idx],
+                         skb->data, skb->len, DMA_TO_DEVICE, &paddr);
+       if (ret) {
+               dev_kfree_skb_any(skb);
+               priv->stats_tx.dropped++;
+               return NETDEV_TX_OK;
+       }
+
+       priv->tx.desc[proc_idx].skbs = skb;
+
+       ave_desc_write_addr(ndev, AVE_DESCID_TX, proc_idx, paddr);
+
+       cmdsts = AVE_STS_OWN | AVE_STS_1ST | AVE_STS_LAST |
+               (skb->len & AVE_STS_PKTLEN_TX_MASK);
+
+       /* set interrupt per AVE_FORCE_TXINTCNT or when queue is stopped */
+       if (!(proc_idx % AVE_FORCE_TXINTCNT) || netif_queue_stopped(ndev))
+               cmdsts |= AVE_STS_INTR;
+
+       /* disable checksum calculation when skb doesn't calurate checksum */
+       if (skb->ip_summed == CHECKSUM_NONE ||
+           skb->ip_summed == CHECKSUM_UNNECESSARY)
+               cmdsts |= AVE_STS_NOCSUM;
+
+       ave_desc_write_cmdsts(ndev, AVE_DESCID_TX, proc_idx, cmdsts);
+
+       priv->tx.proc_idx = (proc_idx + 1) % ndesc;
+
+       return NETDEV_TX_OK;
+}
+
+static int ave_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
+{
+       return phy_mii_ioctl(ndev->phydev, ifr, cmd);
+}
+
+static const u8 v4multi_macadr[] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
+static const u8 v6multi_macadr[] = { 0x33, 0x00, 0x00, 0x00, 0x00, 0x00 };
+
+static void ave_set_rx_mode(struct net_device *ndev)
+{
+       struct ave_private *priv = netdev_priv(ndev);
+       struct netdev_hw_addr *hw_adr;
+       int count, mc_cnt;
+       u32 val;
+
+       /* MAC addr filter enable for promiscious mode */
+       mc_cnt = netdev_mc_count(ndev);
+       val = readl(priv->base + AVE_RXCR);
+       if (ndev->flags & IFF_PROMISC || !mc_cnt)
+               val &= ~AVE_RXCR_AFEN;
+       else
+               val |= AVE_RXCR_AFEN;
+       writel(val, priv->base + AVE_RXCR);
+
+       /* set all multicast address */
+       if ((ndev->flags & IFF_ALLMULTI) || mc_cnt > AVE_PF_MULTICAST_SIZE) {
+               ave_pfsel_set_macaddr(ndev, AVE_PFNUM_MULTICAST,
+                                     v4multi_macadr, 1);
+               ave_pfsel_set_macaddr(ndev, AVE_PFNUM_MULTICAST + 1,
+                                     v6multi_macadr, 1);
+       } else {
+               /* stop all multicast filter */
+               for (count = 0; count < AVE_PF_MULTICAST_SIZE; count++)
+                       ave_pfsel_stop(ndev, AVE_PFNUM_MULTICAST + count);
+
+               /* set multicast addresses */
+               count = 0;
+               netdev_for_each_mc_addr(hw_adr, ndev) {
+                       if (count == mc_cnt)
+                               break;
+                       ave_pfsel_set_macaddr(ndev, AVE_PFNUM_MULTICAST + count,
+                                             hw_adr->addr, 6);
+                       count++;
+               }
+       }
+}
+
+static void ave_get_stats64(struct net_device *ndev,
+                           struct rtnl_link_stats64 *stats)
+{
+       struct ave_private *priv = netdev_priv(ndev);
+       unsigned int start;
+
+       do {
+               start = u64_stats_fetch_begin_irq(&priv->stats_rx.syncp);
+               stats->rx_packets = priv->stats_rx.packets;
+               stats->rx_bytes   = priv->stats_rx.bytes;
+       } while (u64_stats_fetch_retry_irq(&priv->stats_rx.syncp, start));
+
+       do {
+               start = u64_stats_fetch_begin_irq(&priv->stats_tx.syncp);
+               stats->tx_packets = priv->stats_tx.packets;
+               stats->tx_bytes   = priv->stats_tx.bytes;
+       } while (u64_stats_fetch_retry_irq(&priv->stats_tx.syncp, start));
+
+       stats->rx_errors      = priv->stats_rx.errors;
+       stats->tx_errors      = priv->stats_tx.errors;
+       stats->rx_dropped     = priv->stats_rx.dropped;
+       stats->tx_dropped     = priv->stats_tx.dropped;
+       stats->rx_fifo_errors = priv->stats_rx.fifo_errors;
+       stats->collisions     = priv->stats_tx.collisions;
+}
+
+static int ave_set_mac_address(struct net_device *ndev, void *p)
+{
+       int ret = eth_mac_addr(ndev, p);
+
+       if (ret)
+               return ret;
+
+       ave_macaddr_init(ndev);
+
+       return 0;
+}
+
+static const struct net_device_ops ave_netdev_ops = {
+       .ndo_init               = ave_init,
+       .ndo_uninit             = ave_uninit,
+       .ndo_open               = ave_open,
+       .ndo_stop               = ave_stop,
+       .ndo_start_xmit         = ave_start_xmit,
+       .ndo_do_ioctl           = ave_ioctl,
+       .ndo_set_rx_mode        = ave_set_rx_mode,
+       .ndo_get_stats64        = ave_get_stats64,
+       .ndo_set_mac_address    = ave_set_mac_address,
+};
+
+static int ave_probe(struct platform_device *pdev)
+{
+       const struct ave_soc_data *data;
+       struct device *dev = &pdev->dev;
+       char buf[ETHTOOL_FWVERS_LEN];
+       phy_interface_t phy_mode;
+       struct ave_private *priv;
+       struct net_device *ndev;
+       struct device_node *np;
+       struct resource *res;
+       const void *mac_addr;
+       void __iomem *base;
+       u64 dma_mask;
+       int irq, ret;
+       u32 ave_id;
+
+       data = of_device_get_match_data(dev);
+       if (WARN_ON(!data))
+               return -EINVAL;
+
+       np = dev->of_node;
+       phy_mode = of_get_phy_mode(np);
+       if (phy_mode < 0) {
+               dev_err(dev, "phy-mode not found\n");
+               return -EINVAL;
+       }
+       if ((!phy_interface_mode_is_rgmii(phy_mode)) &&
+           phy_mode != PHY_INTERFACE_MODE_RMII &&
+           phy_mode != PHY_INTERFACE_MODE_MII) {
+               dev_err(dev, "phy-mode is invalid\n");
+               return -EINVAL;
+       }
+
+       irq = platform_get_irq(pdev, 0);
+       if (irq < 0) {
+               dev_err(dev, "IRQ not found\n");
+               return irq;
+       }
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       base = devm_ioremap_resource(dev, res);
+       if (IS_ERR(base))
+               return PTR_ERR(base);
+
+       ndev = alloc_etherdev(sizeof(struct ave_private));
+       if (!ndev) {
+               dev_err(dev, "can't allocate ethernet device\n");
+               return -ENOMEM;
+       }
+
+       ndev->netdev_ops = &ave_netdev_ops;
+       ndev->ethtool_ops = &ave_ethtool_ops;
+       SET_NETDEV_DEV(ndev, dev);
+
+       ndev->features    |= (NETIF_F_IP_CSUM | NETIF_F_RXCSUM);
+       ndev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_RXCSUM);
+
+       ndev->max_mtu = AVE_MAX_ETHFRAME - (ETH_HLEN + ETH_FCS_LEN);
+
+       mac_addr = of_get_mac_address(np);
+       if (mac_addr)
+               ether_addr_copy(ndev->dev_addr, mac_addr);
+
+       /* if the mac address is invalid, use random mac address */
+       if (!is_valid_ether_addr(ndev->dev_addr)) {
+               eth_hw_addr_random(ndev);
+               dev_warn(dev, "Using random MAC address: %pM\n",
+                        ndev->dev_addr);
+       }
+
+       priv = netdev_priv(ndev);
+       priv->base = base;
+       priv->irq = irq;
+       priv->ndev = ndev;
+       priv->msg_enable = netif_msg_init(-1, AVE_DEFAULT_MSG_ENABLE);
+       priv->phy_mode = phy_mode;
+       priv->data = data;
+
+       if (IS_DESC_64BIT(priv)) {
+               priv->desc_size = AVE_DESC_SIZE_64;
+               priv->tx.daddr  = AVE_TXDM_64;
+               priv->rx.daddr  = AVE_RXDM_64;
+               dma_mask = DMA_BIT_MASK(64);
+       } else {
+               priv->desc_size = AVE_DESC_SIZE_32;
+               priv->tx.daddr  = AVE_TXDM_32;
+               priv->rx.daddr  = AVE_RXDM_32;
+               dma_mask = DMA_BIT_MASK(32);
+       }
+       ret = dma_set_mask(dev, dma_mask);
+       if (ret)
+               goto out_free_netdev;
+
+       priv->tx.ndesc = AVE_NR_TXDESC;
+       priv->rx.ndesc = AVE_NR_RXDESC;
+
+       u64_stats_init(&priv->stats_tx.syncp);
+       u64_stats_init(&priv->stats_rx.syncp);
+
+       priv->clk = devm_clk_get(dev, NULL);
+       if (IS_ERR(priv->clk)) {
+               ret = PTR_ERR(priv->clk);
+               goto out_free_netdev;
+       }
+
+       priv->rst = devm_reset_control_get_optional_shared(dev, NULL);
+       if (IS_ERR(priv->rst)) {
+               ret = PTR_ERR(priv->rst);
+               goto out_free_netdev;
+       }
+
+       priv->mdio = devm_mdiobus_alloc(dev);
+       if (!priv->mdio) {
+               ret = -ENOMEM;
+               goto out_free_netdev;
+       }
+       priv->mdio->priv = ndev;
+       priv->mdio->parent = dev;
+       priv->mdio->read = ave_mdiobus_read;
+       priv->mdio->write = ave_mdiobus_write;
+       priv->mdio->name = "uniphier-mdio";
+       snprintf(priv->mdio->id, MII_BUS_ID_SIZE, "%s-%x",
+                pdev->name, pdev->id);
+
+       /* Register as a NAPI supported driver */
+       netif_napi_add(ndev, &priv->napi_rx, ave_napi_poll_rx, priv->rx.ndesc);
+       netif_tx_napi_add(ndev, &priv->napi_tx, ave_napi_poll_tx,
+                         priv->tx.ndesc);
+
+       platform_set_drvdata(pdev, ndev);
+
+       ret = register_netdev(ndev);
+       if (ret) {
+               dev_err(dev, "failed to register netdevice\n");
+               goto out_del_napi;
+       }
+
+       /* get ID and version */
+       ave_id = readl(priv->base + AVE_IDR);
+       ave_hw_read_version(ndev, buf, sizeof(buf));
+
+       dev_info(dev, "Socionext %c%c%c%c Ethernet IP %s (irq=%d, phy=%s)\n",
+                (ave_id >> 24) & 0xff, (ave_id >> 16) & 0xff,
+                (ave_id >> 8) & 0xff, (ave_id >> 0) & 0xff,
+                buf, priv->irq, phy_modes(phy_mode));
+
+       return 0;
+
+out_del_napi:
+       netif_napi_del(&priv->napi_rx);
+       netif_napi_del(&priv->napi_tx);
+out_free_netdev:
+       free_netdev(ndev);
+
+       return ret;
+}
+
+static int ave_remove(struct platform_device *pdev)
+{
+       struct net_device *ndev = platform_get_drvdata(pdev);
+       struct ave_private *priv = netdev_priv(ndev);
+
+       unregister_netdev(ndev);
+       netif_napi_del(&priv->napi_rx);
+       netif_napi_del(&priv->napi_tx);
+       free_netdev(ndev);
+
+       return 0;
+}
+
+static const struct ave_soc_data ave_pro4_data = {
+       .is_desc_64bit = false,
+};
+
+static const struct ave_soc_data ave_pxs2_data = {
+       .is_desc_64bit = false,
+};
+
+static const struct ave_soc_data ave_ld11_data = {
+       .is_desc_64bit = false,
+};
+
+static const struct ave_soc_data ave_ld20_data = {
+       .is_desc_64bit = true,
+};
+
+static const struct of_device_id of_ave_match[] = {
+       {
+               .compatible = "socionext,uniphier-pro4-ave4",
+               .data = &ave_pro4_data,
+       },
+       {
+               .compatible = "socionext,uniphier-pxs2-ave4",
+               .data = &ave_pxs2_data,
+       },
+       {
+               .compatible = "socionext,uniphier-ld11-ave4",
+               .data = &ave_ld11_data,
+       },
+       {
+               .compatible = "socionext,uniphier-ld20-ave4",
+               .data = &ave_ld20_data,
+       },
+       { /* Sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_ave_match);
+
+static struct platform_driver ave_driver = {
+       .probe  = ave_probe,
+       .remove = ave_remove,
+       .driver = {
+               .name = "ave",
+               .of_match_table = of_ave_match,
+       },
+};
+module_platform_driver(ave_driver);
+
+MODULE_DESCRIPTION("Socionext UniPhier AVE ethernet driver");
+MODULE_LICENSE("GPL v2");
index e1e5ac0..ce2ea2d 100644 (file)
@@ -409,7 +409,7 @@ struct stmmac_desc_ops {
        /* get timestamp value */
         u64(*get_timestamp) (void *desc, u32 ats);
        /* get rx timestamp status */
-       int (*get_rx_timestamp_status) (void *desc, u32 ats);
+       int (*get_rx_timestamp_status)(void *desc, void *next_desc, u32 ats);
        /* Display ring */
        void (*display_ring)(void *head, unsigned int size, bool rx);
        /* set MSS via context descriptor */
index 4b286e2..2fd8456 100644 (file)
@@ -258,7 +258,8 @@ static int dwmac4_rx_check_timestamp(void *desc)
        return ret;
 }
 
-static int dwmac4_wrback_get_rx_timestamp_status(void *desc, u32 ats)
+static int dwmac4_wrback_get_rx_timestamp_status(void *desc, void *next_desc,
+                                                u32 ats)
 {
        struct dma_desc *p = (struct dma_desc *)desc;
        int ret = -EINVAL;
@@ -270,7 +271,7 @@ static int dwmac4_wrback_get_rx_timestamp_status(void *desc, u32 ats)
 
                        /* Check if timestamp is OK from context descriptor */
                        do {
-                               ret = dwmac4_rx_check_timestamp(desc);
+                               ret = dwmac4_rx_check_timestamp(next_desc);
                                if (ret < 0)
                                        goto exit;
                                i++;
@@ -405,7 +406,7 @@ static void dwmac4_display_ring(void *head, unsigned int size, bool rx)
        pr_info("%s descriptor ring:\n", rx ? "RX" : "TX");
 
        for (i = 0; i < size; i++) {
-               pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
+               pr_info("%03d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
                        i, (unsigned int)virt_to_phys(p),
                        le32_to_cpu(p->des0), le32_to_cpu(p->des1),
                        le32_to_cpu(p->des2), le32_to_cpu(p->des3));
index 7546b36..b47cb5c 100644 (file)
@@ -400,7 +400,8 @@ static u64 enh_desc_get_timestamp(void *desc, u32 ats)
        return ns;
 }
 
-static int enh_desc_get_rx_timestamp_status(void *desc, u32 ats)
+static int enh_desc_get_rx_timestamp_status(void *desc, void *next_desc,
+                                           u32 ats)
 {
        if (ats) {
                struct dma_extended_desc *p = (struct dma_extended_desc *)desc;
@@ -427,7 +428,7 @@ static void enh_desc_display_ring(void *head, unsigned int size, bool rx)
                u64 x;
 
                x = *(u64 *)ep;
-               pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
+               pr_info("%03d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
                        i, (unsigned int)virt_to_phys(ep),
                        (unsigned int)x, (unsigned int)(x >> 32),
                        ep->basic.des2, ep->basic.des3);
index f817f8f..ebd9e5e 100644 (file)
@@ -265,7 +265,7 @@ static u64 ndesc_get_timestamp(void *desc, u32 ats)
        return ns;
 }
 
-static int ndesc_get_rx_timestamp_status(void *desc, u32 ats)
+static int ndesc_get_rx_timestamp_status(void *desc, void *next_desc, u32 ats)
 {
        struct dma_desc *p = (struct dma_desc *)desc;
 
@@ -288,7 +288,7 @@ static void ndesc_display_ring(void *head, unsigned int size, bool rx)
                u64 x;
 
                x = *(u64 *)p;
-               pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x",
+               pr_info("%03d [0x%x]: 0x%x 0x%x 0x%x 0x%x",
                        i, (unsigned int)virt_to_phys(p),
                        (unsigned int)x, (unsigned int)(x >> 32),
                        p->des2, p->des3);
index 721b616..08c19eb 100644 (file)
@@ -34,6 +34,7 @@ static u32 stmmac_config_sub_second_increment(void __iomem *ioaddr,
 {
        u32 value = readl(ioaddr + PTP_TCR);
        unsigned long data;
+       u32 reg_value;
 
        /* For GMAC3.x, 4.x versions, convert the ptp_clock to nano second
         *      formula = (1/ptp_clock) * 1000000000
@@ -50,10 +51,11 @@ static u32 stmmac_config_sub_second_increment(void __iomem *ioaddr,
 
        data &= PTP_SSIR_SSINC_MASK;
 
+       reg_value = data;
        if (gmac4)
-               data = data << GMAC4_PTP_SSIR_SSINC_SHIFT;
+               reg_value <<= GMAC4_PTP_SSIR_SSINC_SHIFT;
 
-       writel(data, ioaddr + PTP_SSIR);
+       writel(reg_value, ioaddr + PTP_SSIR);
 
        return data;
 }
index c52a996..d9c98fd 100644 (file)
@@ -482,7 +482,7 @@ static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
                desc = np;
 
        /* Check if timestamp is available */
-       if (priv->hw->desc->get_rx_timestamp_status(desc, priv->adv_ts)) {
+       if (priv->hw->desc->get_rx_timestamp_status(p, np, priv->adv_ts)) {
                ns = priv->hw->desc->get_timestamp(desc, priv->adv_ts);
                netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
                shhwtstamp = skb_hwtstamps(skb);
@@ -3436,9 +3436,8 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
                        if (netif_msg_rx_status(priv)) {
                                netdev_dbg(priv->dev, "\tdesc: %p [entry %d] buff=0x%x\n",
                                           p, entry, des);
-                               if (frame_len > ETH_FRAME_LEN)
-                                       netdev_dbg(priv->dev, "frame size %d, COE: %d\n",
-                                                  frame_len, status);
+                               netdev_dbg(priv->dev, "frame size %d, COE: %d\n",
+                                          frame_len, status);
                        }
 
                        /* The zero-copy is always used for all the sizes
index d655a42..eb1c6b0 100644 (file)
@@ -333,9 +333,8 @@ void xlgmac_print_pkt(struct net_device *netdev,
                      struct sk_buff *skb, bool tx_rx)
 {
        struct ethhdr *eth = (struct ethhdr *)skb->data;
-       unsigned char *buf = skb->data;
        unsigned char buffer[128];
-       unsigned int i, j;
+       unsigned int i;
 
        netdev_dbg(netdev, "\n************** SKB dump ****************\n");
 
@@ -346,22 +345,13 @@ void xlgmac_print_pkt(struct net_device *netdev,
        netdev_dbg(netdev, "Src MAC addr: %pM\n", eth->h_source);
        netdev_dbg(netdev, "Protocol: %#06hx\n", ntohs(eth->h_proto));
 
-       for (i = 0, j = 0; i < skb->len;) {
-               j += snprintf(buffer + j, sizeof(buffer) - j, "%02hhx",
-                             buf[i++]);
-
-               if ((i % 32) == 0) {
-                       netdev_dbg(netdev, "  %#06x: %s\n", i - 32, buffer);
-                       j = 0;
-               } else if ((i % 16) == 0) {
-                       buffer[j++] = ' ';
-                       buffer[j++] = ' ';
-               } else if ((i % 4) == 0) {
-                       buffer[j++] = ' ';
-               }
+       for (i = 0; i < skb->len; i += 32) {
+               unsigned int len = min(skb->len - i, 32U);
+
+               hex_dump_to_buffer(&skb->data[i], len, 32, 1,
+                                  buffer, sizeof(buffer), false);
+               netdev_dbg(netdev, "  %#06x: %s\n", i, buffer);
        }
-       if (i % 32)
-               netdev_dbg(netdev, "  %#06x: %s\n", i - (i % 32), buffer);
 
        netdev_dbg(netdev, "\n************** SKB dump ****************\n");
 }
index 078d2c3..5134d5c 100644 (file)
@@ -66,7 +66,7 @@ nsim_bpf_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn)
        return 0;
 }
 
-static const struct bpf_ext_analyzer_ops nsim_bpf_analyzer_ops = {
+static const struct bpf_prog_offload_ops nsim_bpf_analyzer_ops = {
        .insn_hook = nsim_bpf_verify_insn,
 };
 
@@ -107,7 +107,7 @@ int nsim_bpf_setup_tc_block_cb(enum tc_setup_type type,
        struct tc_cls_bpf_offload *cls_bpf = type_data;
        struct bpf_prog *prog = cls_bpf->prog;
        struct netdevsim *ns = cb_priv;
-       bool skip_sw;
+       struct bpf_prog *oldprog;
 
        if (type != TC_SETUP_CLSBPF ||
            !tc_can_offload(ns->netdev) ||
@@ -115,27 +115,27 @@ int nsim_bpf_setup_tc_block_cb(enum tc_setup_type type,
            cls_bpf->common.chain_index)
                return -EOPNOTSUPP;
 
-       skip_sw = cls_bpf->gen_flags & TCA_CLS_FLAGS_SKIP_SW;
-
-       if (nsim_xdp_offload_active(ns))
-               return -EBUSY;
-
        if (!ns->bpf_tc_accept)
                return -EOPNOTSUPP;
        /* Note: progs without skip_sw will probably not be dev bound */
        if (prog && !prog->aux->offload && !ns->bpf_tc_non_bound_accept)
                return -EOPNOTSUPP;
 
-       switch (cls_bpf->command) {
-       case TC_CLSBPF_REPLACE:
-               return nsim_bpf_offload(ns, prog, true);
-       case TC_CLSBPF_ADD:
-               return nsim_bpf_offload(ns, prog, false);
-       case TC_CLSBPF_DESTROY:
-               return nsim_bpf_offload(ns, NULL, true);
-       default:
+       if (cls_bpf->command != TC_CLSBPF_OFFLOAD)
                return -EOPNOTSUPP;
+
+       oldprog = cls_bpf->oldprog;
+
+       /* Don't remove if oldprog doesn't match driver's state */
+       if (ns->bpf_offloaded != oldprog) {
+               oldprog = NULL;
+               if (!cls_bpf->prog)
+                       return 0;
+               if (ns->bpf_offloaded)
+                       return -EBUSY;
        }
+
+       return nsim_bpf_offload(ns, cls_bpf->prog, oldprog);
 }
 
 int nsim_bpf_disable_tc(struct netdevsim *ns)
@@ -201,7 +201,6 @@ static int nsim_bpf_create_prog(struct netdevsim *ns, struct bpf_prog *prog)
 {
        struct nsim_bpf_bound_prog *state;
        char name[16];
-       int err;
 
        state = kzalloc(sizeof(*state), GFP_KERNEL);
        if (!state)
@@ -214,10 +213,9 @@ static int nsim_bpf_create_prog(struct netdevsim *ns, struct bpf_prog *prog)
        /* Program id is not populated yet when we create the state. */
        sprintf(name, "%u", ns->prog_id_gen++);
        state->ddir = debugfs_create_dir(name, ns->ddir_bpf_bound_progs);
-       if (IS_ERR(state->ddir)) {
-               err = PTR_ERR(state->ddir);
+       if (IS_ERR_OR_NULL(state->ddir)) {
                kfree(state);
-               return err;
+               return -ENOMEM;
        }
 
        debugfs_create_u32("id", 0400, state->ddir, &prog->aux->id);
@@ -349,6 +347,8 @@ int nsim_bpf_init(struct netdevsim *ns)
                           &ns->bpf_bind_verifier_delay);
        ns->ddir_bpf_bound_progs =
                debugfs_create_dir("bpf_bound_progs", ns->ddir);
+       if (IS_ERR_OR_NULL(ns->ddir_bpf_bound_progs))
+               return -ENOMEM;
 
        ns->bpf_tc_accept = true;
        debugfs_create_bool("bpf_tc_accept", 0600, ns->ddir,
index eb8c679..3fd5679 100644 (file)
@@ -139,7 +139,7 @@ static void nsim_dev_release(struct device *dev)
        free_netdev(ns->netdev);
 }
 
-struct device_type nsim_dev_type = {
+static struct device_type nsim_dev_type = {
        .groups = nsim_dev_attr_groups,
        .release = nsim_dev_release,
 };
@@ -151,6 +151,8 @@ static int nsim_init(struct net_device *dev)
 
        ns->netdev = dev;
        ns->ddir = debugfs_create_dir(netdev_name(dev), nsim_ddir);
+       if (IS_ERR_OR_NULL(ns->ddir))
+               return -ENOMEM;
 
        err = nsim_bpf_init(ns);
        if (err)
@@ -469,8 +471,8 @@ static int __init nsim_module_init(void)
        int err;
 
        nsim_ddir = debugfs_create_dir(DRV_NAME, NULL);
-       if (IS_ERR(nsim_ddir))
-               return PTR_ERR(nsim_ddir);
+       if (IS_ERR_OR_NULL(nsim_ddir))
+               return -ENOMEM;
 
        err = bus_register(&nsim_bus);
        if (err)
index 1e190f3..c271590 100644 (file)
@@ -215,34 +215,22 @@ static int at803x_suspend(struct phy_device *phydev)
        int value;
        int wol_enabled;
 
-       mutex_lock(&phydev->lock);
-
        value = phy_read(phydev, AT803X_INTR_ENABLE);
        wol_enabled = value & AT803X_INTR_ENABLE_WOL;
 
-       value = phy_read(phydev, MII_BMCR);
-
        if (wol_enabled)
-               value |= BMCR_ISOLATE;
+               value = BMCR_ISOLATE;
        else
-               value |= BMCR_PDOWN;
+               value = BMCR_PDOWN;
 
-       phy_write(phydev, MII_BMCR, value);
-
-       mutex_unlock(&phydev->lock);
+       phy_modify(phydev, MII_BMCR, 0, value);
 
        return 0;
 }
 
 static int at803x_resume(struct phy_device *phydev)
 {
-       int value;
-
-       value = phy_read(phydev, MII_BMCR);
-       value &= ~(BMCR_PDOWN | BMCR_ISOLATE);
-       phy_write(phydev, MII_BMCR, value);
-
-       return 0;
+       return phy_modify(phydev, MII_BMCR, ~(BMCR_PDOWN | BMCR_ISOLATE), 0);
 }
 
 static int at803x_probe(struct phy_device *phydev)
index eb51672..001fe1d 100644 (file)
@@ -115,37 +115,6 @@ int fixed_phy_set_link_update(struct phy_device *phydev,
 }
 EXPORT_SYMBOL_GPL(fixed_phy_set_link_update);
 
-int fixed_phy_update_state(struct phy_device *phydev,
-                          const struct fixed_phy_status *status,
-                          const struct fixed_phy_status *changed)
-{
-       struct fixed_mdio_bus *fmb = &platform_fmb;
-       struct fixed_phy *fp;
-
-       if (!phydev || phydev->mdio.bus != fmb->mii_bus)
-               return -EINVAL;
-
-       list_for_each_entry(fp, &fmb->phys, node) {
-               if (fp->addr == phydev->mdio.addr) {
-                       write_seqcount_begin(&fp->seqcount);
-#define _UPD(x) if (changed->x) \
-       fp->status.x = status->x
-                       _UPD(link);
-                       _UPD(speed);
-                       _UPD(duplex);
-                       _UPD(pause);
-                       _UPD(asym_pause);
-#undef _UPD
-                       fixed_phy_update(fp);
-                       write_seqcount_end(&fp->seqcount);
-                       return 0;
-               }
-       }
-
-       return -ENOENT;
-}
-EXPORT_SYMBOL(fixed_phy_update_state);
-
 int fixed_phy_add(unsigned int irq, int phy_addr,
                  struct fixed_phy_status *status,
                  int link_gpio)
index 2fc026d..2bd3896 100644 (file)
@@ -83,7 +83,7 @@
 #define MII_88E1121_PHY_MSCR_REG       21
 #define MII_88E1121_PHY_MSCR_RX_DELAY  BIT(5)
 #define MII_88E1121_PHY_MSCR_TX_DELAY  BIT(4)
-#define MII_88E1121_PHY_MSCR_DELAY_MASK        (~(BIT(5) | BIT(4)))
+#define MII_88E1121_PHY_MSCR_DELAY_MASK        (BIT(5) | BIT(4))
 
 #define MII_88E1121_MISC_TEST                          0x1a
 #define MII_88E1510_MISC_TEST_TEMP_THRESHOLD_MASK      0x1f00
@@ -177,27 +177,19 @@ struct marvell_priv {
        struct device *hwmon_dev;
 };
 
-static int marvell_get_page(struct phy_device *phydev)
+static int marvell_read_page(struct phy_device *phydev)
 {
-       return phy_read(phydev, MII_MARVELL_PHY_PAGE);
+       return __phy_read(phydev, MII_MARVELL_PHY_PAGE);
 }
 
-static int marvell_set_page(struct phy_device *phydev, int page)
+static int marvell_write_page(struct phy_device *phydev, int page)
 {
-       return phy_write(phydev, MII_MARVELL_PHY_PAGE, page);
+       return __phy_write(phydev, MII_MARVELL_PHY_PAGE, page);
 }
 
-static int marvell_get_set_page(struct phy_device *phydev, int page)
+static int marvell_set_page(struct phy_device *phydev, int page)
 {
-       int oldpage = marvell_get_page(phydev);
-
-       if (oldpage < 0)
-               return oldpage;
-
-       if (page != oldpage)
-               return marvell_set_page(phydev, page);
-
-       return 0;
+       return phy_write(phydev, MII_MARVELL_PHY_PAGE, page);
 }
 
 static int marvell_ack_interrupt(struct phy_device *phydev)
@@ -399,7 +391,7 @@ static int m88e1111_config_aneg(struct phy_device *phydev)
 static int marvell_of_reg_init(struct phy_device *phydev)
 {
        const __be32 *paddr;
-       int len, i, saved_page, current_page, ret;
+       int len, i, saved_page, current_page, ret = 0;
 
        if (!phydev->mdio.dev.of_node)
                return 0;
@@ -409,12 +401,11 @@ static int marvell_of_reg_init(struct phy_device *phydev)
        if (!paddr || len < (4 * sizeof(*paddr)))
                return 0;
 
-       saved_page = marvell_get_page(phydev);
+       saved_page = phy_save_page(phydev);
        if (saved_page < 0)
-               return saved_page;
+               goto err;
        current_page = saved_page;
 
-       ret = 0;
        len /= sizeof(*paddr);
        for (i = 0; i < len - 3; i += 4) {
                u16 page = be32_to_cpup(paddr + i);
@@ -425,14 +416,14 @@ static int marvell_of_reg_init(struct phy_device *phydev)
 
                if (page != current_page) {
                        current_page = page;
-                       ret = marvell_set_page(phydev, page);
+                       ret = marvell_write_page(phydev, page);
                        if (ret < 0)
                                goto err;
                }
 
                val = 0;
                if (mask) {
-                       val = phy_read(phydev, reg);
+                       val = __phy_read(phydev, reg);
                        if (val < 0) {
                                ret = val;
                                goto err;
@@ -441,17 +432,12 @@ static int marvell_of_reg_init(struct phy_device *phydev)
                }
                val |= val_bits;
 
-               ret = phy_write(phydev, reg, val);
+               ret = __phy_write(phydev, reg, val);
                if (ret < 0)
                        goto err;
        }
 err:
-       if (current_page != saved_page) {
-               i = marvell_set_page(phydev, saved_page);
-               if (ret == 0)
-                       ret = i;
-       }
-       return ret;
+       return phy_restore_page(phydev, saved_page, ret);
 }
 #else
 static int marvell_of_reg_init(struct phy_device *phydev)
@@ -462,34 +448,21 @@ static int marvell_of_reg_init(struct phy_device *phydev)
 
 static int m88e1121_config_aneg_rgmii_delays(struct phy_device *phydev)
 {
-       int err, oldpage, mscr;
-
-       oldpage = marvell_get_set_page(phydev, MII_MARVELL_MSCR_PAGE);
-       if (oldpage < 0)
-               return oldpage;
-
-       mscr = phy_read(phydev, MII_88E1121_PHY_MSCR_REG);
-       if (mscr < 0) {
-               err = mscr;
-               goto out;
-       }
-
-       mscr &= MII_88E1121_PHY_MSCR_DELAY_MASK;
+       int mscr;
 
        if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID)
-               mscr |= (MII_88E1121_PHY_MSCR_RX_DELAY |
-                        MII_88E1121_PHY_MSCR_TX_DELAY);
+               mscr MII_88E1121_PHY_MSCR_RX_DELAY |
+                      MII_88E1121_PHY_MSCR_TX_DELAY;
        else if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID)
-               mscr |= MII_88E1121_PHY_MSCR_RX_DELAY;
+               mscr = MII_88E1121_PHY_MSCR_RX_DELAY;
        else if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
-               mscr |= MII_88E1121_PHY_MSCR_TX_DELAY;
-
-       err = phy_write(phydev, MII_88E1121_PHY_MSCR_REG, mscr);
-
-out:
-       marvell_set_page(phydev, oldpage);
+               mscr = MII_88E1121_PHY_MSCR_TX_DELAY;
+       else
+               mscr = 0;
 
-       return err;
+       return phy_modify_paged(phydev, MII_MARVELL_MSCR_PAGE,
+                               MII_88E1121_PHY_MSCR_REG,
+                               MII_88E1121_PHY_MSCR_DELAY_MASK, mscr);
 }
 
 static int m88e1121_config_aneg(struct phy_device *phydev)
@@ -498,7 +471,7 @@ static int m88e1121_config_aneg(struct phy_device *phydev)
 
        if (phy_interface_is_rgmii(phydev)) {
                err = m88e1121_config_aneg_rgmii_delays(phydev);
-               if (err)
+               if (err < 0)
                        return err;
        }
 
@@ -515,20 +488,11 @@ static int m88e1121_config_aneg(struct phy_device *phydev)
 
 static int m88e1318_config_aneg(struct phy_device *phydev)
 {
-       int err, oldpage, mscr;
-
-       oldpage = marvell_get_set_page(phydev, MII_MARVELL_MSCR_PAGE);
-       if (oldpage < 0)
-               return oldpage;
-
-       mscr = phy_read(phydev, MII_88E1318S_PHY_MSCR1_REG);
-       mscr |= MII_88E1318S_PHY_MSCR1_PAD_ODD;
-
-       err = phy_write(phydev, MII_88E1318S_PHY_MSCR1_REG, mscr);
-       if (err < 0)
-               return err;
+       int err;
 
-       err = marvell_set_page(phydev, oldpage);
+       err = phy_modify_paged(phydev, MII_MARVELL_MSCR_PAGE,
+                              MII_88E1318S_PHY_MSCR1_REG,
+                              0, MII_88E1318S_PHY_MSCR1_PAD_ODD);
        if (err < 0)
                return err;
 
@@ -700,19 +664,14 @@ static int m88e1116r_config_init(struct phy_device *phydev)
 
 static int m88e3016_config_init(struct phy_device *phydev)
 {
-       int reg;
+       int ret;
 
        /* Enable Scrambler and Auto-Crossover */
-       reg = phy_read(phydev, MII_88E3016_PHY_SPEC_CTRL);
-       if (reg < 0)
-               return reg;
-
-       reg &= ~MII_88E3016_DISABLE_SCRAMBLER;
-       reg |= MII_88E3016_AUTO_MDIX_CROSSOVER;
-
-       reg = phy_write(phydev, MII_88E3016_PHY_SPEC_CTRL, reg);
-       if (reg < 0)
-               return reg;
+       ret = phy_modify(phydev, MII_88E3016_PHY_SPEC_CTRL,
+                        ~MII_88E3016_DISABLE_SCRAMBLER,
+                        MII_88E3016_AUTO_MDIX_CROSSOVER);
+       if (ret < 0)
+               return ret;
 
        return marvell_config_init(phydev);
 }
@@ -721,42 +680,34 @@ static int m88e1111_config_init_hwcfg_mode(struct phy_device *phydev,
                                           u16 mode,
                                           int fibre_copper_auto)
 {
-       int temp;
-
-       temp = phy_read(phydev, MII_M1111_PHY_EXT_SR);
-       if (temp < 0)
-               return temp;
-
-       temp &= ~(MII_M1111_HWCFG_MODE_MASK |
-                 MII_M1111_HWCFG_FIBER_COPPER_AUTO |
-                 MII_M1111_HWCFG_FIBER_COPPER_RES);
-       temp |= mode;
-
        if (fibre_copper_auto)
-               temp |= MII_M1111_HWCFG_FIBER_COPPER_AUTO;
+               mode |= MII_M1111_HWCFG_FIBER_COPPER_AUTO;
 
-       return phy_write(phydev, MII_M1111_PHY_EXT_SR, temp);
+       return phy_modify(phydev, MII_M1111_PHY_EXT_SR,
+                         (u16)~(MII_M1111_HWCFG_MODE_MASK |
+                                MII_M1111_HWCFG_FIBER_COPPER_AUTO |
+                                MII_M1111_HWCFG_FIBER_COPPER_RES),
+                         mode);
 }
 
 static int m88e1111_config_init_rgmii_delays(struct phy_device *phydev)
 {
-       int temp;
-
-       temp = phy_read(phydev, MII_M1111_PHY_EXT_CR);
-       if (temp < 0)
-               return temp;
+       int delay;
 
        if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) {
-               temp |= (MII_M1111_RGMII_RX_DELAY | MII_M1111_RGMII_TX_DELAY);
+               delay = MII_M1111_RGMII_RX_DELAY | MII_M1111_RGMII_TX_DELAY;
        } else if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) {
-               temp &= ~MII_M1111_RGMII_TX_DELAY;
-               temp |= MII_M1111_RGMII_RX_DELAY;
+               delay = MII_M1111_RGMII_RX_DELAY;
        } else if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) {
-               temp &= ~MII_M1111_RGMII_RX_DELAY;
-               temp |= MII_M1111_RGMII_TX_DELAY;
+               delay = MII_M1111_RGMII_TX_DELAY;
+       } else {
+               delay = 0;
        }
 
-       return phy_write(phydev, MII_M1111_PHY_EXT_CR, temp);
+       return phy_modify(phydev, MII_M1111_PHY_EXT_CR,
+                         (u16)~(MII_M1111_RGMII_RX_DELAY |
+                                MII_M1111_RGMII_TX_DELAY),
+                         delay);
 }
 
 static int m88e1111_config_init_rgmii(struct phy_device *phydev)
@@ -802,7 +753,7 @@ static int m88e1111_config_init_rtbi(struct phy_device *phydev)
        int err;
 
        err = m88e1111_config_init_rgmii_delays(phydev);
-       if (err)
+       if (err < 0)
                return err;
 
        err = m88e1111_config_init_hwcfg_mode(
@@ -829,7 +780,7 @@ static int m88e1111_config_init(struct phy_device *phydev)
 
        if (phy_interface_is_rgmii(phydev)) {
                err = m88e1111_config_init_rgmii(phydev);
-               if (err)
+               if (err < 0)
                        return err;
        }
 
@@ -854,20 +805,15 @@ static int m88e1111_config_init(struct phy_device *phydev)
 
 static int m88e1121_config_init(struct phy_device *phydev)
 {
-       int err, oldpage;
-
-       oldpage = marvell_get_set_page(phydev, MII_MARVELL_LED_PAGE);
-       if (oldpage < 0)
-               return oldpage;
+       int err;
 
        /* Default PHY LED config: LED[0] .. Link, LED[1] .. Activity */
-       err = phy_write(phydev, MII_88E1121_PHY_LED_CTRL,
-                       MII_88E1121_PHY_LED_DEF);
+       err = phy_write_paged(phydev, MII_MARVELL_LED_PAGE,
+                             MII_88E1121_PHY_LED_CTRL,
+                             MII_88E1121_PHY_LED_DEF);
        if (err < 0)
                return err;
 
-       marvell_set_page(phydev, oldpage);
-
        /* Set marvell,reg-init configuration from device tree */
        return marvell_config_init(phydev);
 }
@@ -875,26 +821,26 @@ static int m88e1121_config_init(struct phy_device *phydev)
 static int m88e1510_config_init(struct phy_device *phydev)
 {
        int err;
-       int temp;
 
        /* SGMII-to-Copper mode initialization */
        if (phydev->interface == PHY_INTERFACE_MODE_SGMII) {
+               u32 pause;
+
                /* Select page 18 */
                err = marvell_set_page(phydev, 18);
                if (err < 0)
                        return err;
 
                /* In reg 20, write MODE[2:0] = 0x1 (SGMII to Copper) */
-               temp = phy_read(phydev, MII_88E1510_GEN_CTRL_REG_1);
-               temp &= ~MII_88E1510_GEN_CTRL_REG_1_MODE_MASK;
-               temp |= MII_88E1510_GEN_CTRL_REG_1_MODE_SGMII;
-               err = phy_write(phydev, MII_88E1510_GEN_CTRL_REG_1, temp);
+               err = phy_modify(phydev, MII_88E1510_GEN_CTRL_REG_1,
+                                ~MII_88E1510_GEN_CTRL_REG_1_MODE_MASK,
+                                MII_88E1510_GEN_CTRL_REG_1_MODE_SGMII);
                if (err < 0)
                        return err;
 
                /* PHY reset is necessary after changing MODE[2:0] */
-               temp |= MII_88E1510_GEN_CTRL_REG_1_RESET;
-               err = phy_write(phydev, MII_88E1510_GEN_CTRL_REG_1, temp);
+               err = phy_modify(phydev, MII_88E1510_GEN_CTRL_REG_1, 0,
+                                MII_88E1510_GEN_CTRL_REG_1_RESET);
                if (err < 0)
                        return err;
 
@@ -902,6 +848,16 @@ static int m88e1510_config_init(struct phy_device *phydev)
                err = marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE);
                if (err < 0)
                        return err;
+
+               /* There appears to be a bug in the 88e1512 when used in
+                * SGMII to copper mode, where the AN advertisment register
+                * clears the pause bits each time a negotiation occurs.
+                * This means we can never be truely sure what was advertised,
+                * so disable Pause support.
+                */
+               pause = SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+               phydev->supported &= ~pause;
+               phydev->advertising &= ~pause;
        }
 
        return m88e1121_config_init(phydev);
@@ -990,7 +946,6 @@ static int m88e1149_config_init(struct phy_device *phydev)
 
 static int m88e1145_config_init_rgmii(struct phy_device *phydev)
 {
-       int temp;
        int err;
 
        err = m88e1111_config_init_rgmii_delays(phydev);
@@ -1002,15 +957,9 @@ static int m88e1145_config_init_rgmii(struct phy_device *phydev)
                if (err < 0)
                        return err;
 
-               temp = phy_read(phydev, 0x1e);
-               if (temp < 0)
-                       return temp;
-
-               temp &= 0xf03f;
-               temp |= 2 << 9; /* 36 ohm */
-               temp |= 2 << 6; /* 39 ohm */
-
-               err = phy_write(phydev, 0x1e, temp);
+               err = phy_modify(phydev, 0x1e, 0xf03f,
+                                2 << 9 | /* 36 ohm */
+                                2 << 6); /* 39 ohm */
                if (err < 0)
                        return err;
 
@@ -1386,100 +1335,98 @@ static int m88e1121_did_interrupt(struct phy_device *phydev)
 static void m88e1318_get_wol(struct phy_device *phydev,
                             struct ethtool_wolinfo *wol)
 {
+       int oldpage, ret = 0;
+
        wol->supported = WAKE_MAGIC;
        wol->wolopts = 0;
 
-       if (marvell_set_page(phydev, MII_MARVELL_WOL_PAGE) < 0)
-               return;
+       oldpage = phy_select_page(phydev, MII_MARVELL_WOL_PAGE);
+       if (oldpage < 0)
+               goto error;
 
-       if (phy_read(phydev, MII_88E1318S_PHY_WOL_CTRL) &
-           MII_88E1318S_PHY_WOL_CTRL_MAGIC_PACKET_MATCH_ENABLE)
+       ret = __phy_read(phydev, MII_88E1318S_PHY_WOL_CTRL);
+       if (ret & MII_88E1318S_PHY_WOL_CTRL_MAGIC_PACKET_MATCH_ENABLE)
                wol->wolopts |= WAKE_MAGIC;
 
-       if (marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE) < 0)
-               return;
+error:
+       phy_restore_page(phydev, oldpage, ret);
 }
 
 static int m88e1318_set_wol(struct phy_device *phydev,
                            struct ethtool_wolinfo *wol)
 {
-       int err, oldpage, temp;
+       int err = 0, oldpage;
 
-       oldpage = marvell_get_page(phydev);
+       oldpage = phy_save_page(phydev);
+       if (oldpage < 0)
+               goto error;
 
        if (wol->wolopts & WAKE_MAGIC) {
                /* Explicitly switch to page 0x00, just to be sure */
-               err = marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE);
+               err = marvell_write_page(phydev, MII_MARVELL_COPPER_PAGE);
                if (err < 0)
-                       return err;
+                       goto error;
 
                /* Enable the WOL interrupt */
-               temp = phy_read(phydev, MII_88E1318S_PHY_CSIER);
-               temp |= MII_88E1318S_PHY_CSIER_WOL_EIE;
-               err = phy_write(phydev, MII_88E1318S_PHY_CSIER, temp);
+               err = __phy_modify(phydev, MII_88E1318S_PHY_CSIER, 0,
+                                  MII_88E1318S_PHY_CSIER_WOL_EIE);
                if (err < 0)
-                       return err;
+                       goto error;
 
-               err = marvell_set_page(phydev, MII_MARVELL_LED_PAGE);
+               err = marvell_write_page(phydev, MII_MARVELL_LED_PAGE);
                if (err < 0)
-                       return err;
+                       goto error;
 
                /* Setup LED[2] as interrupt pin (active low) */
-               temp = phy_read(phydev, MII_88E1318S_PHY_LED_TCR);
-               temp &= ~MII_88E1318S_PHY_LED_TCR_FORCE_INT;
-               temp |= MII_88E1318S_PHY_LED_TCR_INTn_ENABLE;
-               temp |= MII_88E1318S_PHY_LED_TCR_INT_ACTIVE_LOW;
-               err = phy_write(phydev, MII_88E1318S_PHY_LED_TCR, temp);
+               err = __phy_modify(phydev, MII_88E1318S_PHY_LED_TCR,
+                                  (u16)~MII_88E1318S_PHY_LED_TCR_FORCE_INT,
+                                  MII_88E1318S_PHY_LED_TCR_INTn_ENABLE |
+                                  MII_88E1318S_PHY_LED_TCR_INT_ACTIVE_LOW);
                if (err < 0)
-                       return err;
+                       goto error;
 
-               err = marvell_set_page(phydev, MII_MARVELL_WOL_PAGE);
+               err = marvell_write_page(phydev, MII_MARVELL_WOL_PAGE);
                if (err < 0)
-                       return err;
+                       goto error;
 
                /* Store the device address for the magic packet */
-               err = phy_write(phydev, MII_88E1318S_PHY_MAGIC_PACKET_WORD2,
+               err = __phy_write(phydev, MII_88E1318S_PHY_MAGIC_PACKET_WORD2,
                                ((phydev->attached_dev->dev_addr[5] << 8) |
                                 phydev->attached_dev->dev_addr[4]));
                if (err < 0)
-                       return err;
-               err = phy_write(phydev, MII_88E1318S_PHY_MAGIC_PACKET_WORD1,
+                       goto error;
+               err = __phy_write(phydev, MII_88E1318S_PHY_MAGIC_PACKET_WORD1,
                                ((phydev->attached_dev->dev_addr[3] << 8) |
                                 phydev->attached_dev->dev_addr[2]));
                if (err < 0)
-                       return err;
-               err = phy_write(phydev, MII_88E1318S_PHY_MAGIC_PACKET_WORD0,
+                       goto error;
+               err = __phy_write(phydev, MII_88E1318S_PHY_MAGIC_PACKET_WORD0,
                                ((phydev->attached_dev->dev_addr[1] << 8) |
                                 phydev->attached_dev->dev_addr[0]));
                if (err < 0)
-                       return err;
+                       goto error;
 
                /* Clear WOL status and enable magic packet matching */
-               temp = phy_read(phydev, MII_88E1318S_PHY_WOL_CTRL);
-               temp |= MII_88E1318S_PHY_WOL_CTRL_CLEAR_WOL_STATUS;
-               temp |= MII_88E1318S_PHY_WOL_CTRL_MAGIC_PACKET_MATCH_ENABLE;
-               err = phy_write(phydev, MII_88E1318S_PHY_WOL_CTRL, temp);
+               err = __phy_modify(phydev, MII_88E1318S_PHY_WOL_CTRL, 0,
+                                  MII_88E1318S_PHY_WOL_CTRL_CLEAR_WOL_STATUS |
+                                  MII_88E1318S_PHY_WOL_CTRL_MAGIC_PACKET_MATCH_ENABLE);
                if (err < 0)
-                       return err;
+                       goto error;
        } else {
-               err = marvell_set_page(phydev, MII_MARVELL_WOL_PAGE);
+               err = marvell_write_page(phydev, MII_MARVELL_WOL_PAGE);
                if (err < 0)
-                       return err;
+                       goto error;
 
                /* Clear WOL status and disable magic packet matching */
-               temp = phy_read(phydev, MII_88E1318S_PHY_WOL_CTRL);
-               temp |= MII_88E1318S_PHY_WOL_CTRL_CLEAR_WOL_STATUS;
-               temp &= ~MII_88E1318S_PHY_WOL_CTRL_MAGIC_PACKET_MATCH_ENABLE;
-               err = phy_write(phydev, MII_88E1318S_PHY_WOL_CTRL, temp);
+               err = __phy_modify(phydev, MII_88E1318S_PHY_WOL_CTRL,
+                                  (u16)~MII_88E1318S_PHY_WOL_CTRL_MAGIC_PACKET_MATCH_ENABLE,
+                                  MII_88E1318S_PHY_WOL_CTRL_CLEAR_WOL_STATUS);
                if (err < 0)
-                       return err;
+                       goto error;
        }
 
-       err = marvell_set_page(phydev, oldpage);
-       if (err < 0)
-               return err;
-
-       return 0;
+error:
+       return phy_restore_page(phydev, oldpage, err);
 }
 
 static int marvell_get_sset_count(struct phy_device *phydev)
@@ -1507,14 +1454,10 @@ static u64 marvell_get_stat(struct phy_device *phydev, int i)
 {
        struct marvell_hw_stat stat = marvell_hw_stats[i];
        struct marvell_priv *priv = phydev->priv;
-       int oldpage, val;
+       int val;
        u64 ret;
 
-       oldpage = marvell_get_set_page(phydev, stat.page);
-       if (oldpage < 0)
-               return UINT64_MAX;
-
-       val = phy_read(phydev, stat.reg);
+       val = phy_read_paged(phydev, stat.page, stat.reg);
        if (val < 0) {
                ret = UINT64_MAX;
        } else {
@@ -1523,8 +1466,6 @@ static u64 marvell_get_stat(struct phy_device *phydev, int i)
                ret = priv->stats[i];
        }
 
-       marvell_set_page(phydev, oldpage);
-
        return ret;
 }
 
@@ -1541,51 +1482,44 @@ static void marvell_get_stats(struct phy_device *phydev,
 static int m88e1121_get_temp(struct phy_device *phydev, long *temp)
 {
        int oldpage;
-       int ret;
+       int ret = 0;
        int val;
 
        *temp = 0;
 
-       mutex_lock(&phydev->lock);
-
-       oldpage = marvell_get_set_page(phydev, MII_MARVELL_MISC_TEST_PAGE);
-       if (oldpage < 0) {
-               mutex_unlock(&phydev->lock);
-               return oldpage;
-       }
+       oldpage = phy_select_page(phydev, MII_MARVELL_MISC_TEST_PAGE);
+       if (oldpage < 0)
+               goto error;
 
        /* Enable temperature sensor */
-       ret = phy_read(phydev, MII_88E1121_MISC_TEST);
+       ret = __phy_read(phydev, MII_88E1121_MISC_TEST);
        if (ret < 0)
                goto error;
 
-       ret = phy_write(phydev, MII_88E1121_MISC_TEST,
-                       ret | MII_88E1121_MISC_TEST_TEMP_SENSOR_EN);
+       ret = __phy_write(phydev, MII_88E1121_MISC_TEST,
+                         ret | MII_88E1121_MISC_TEST_TEMP_SENSOR_EN);
        if (ret < 0)
                goto error;
 
        /* Wait for temperature to stabilize */
        usleep_range(10000, 12000);
 
-       val = phy_read(phydev, MII_88E1121_MISC_TEST);
+       val = __phy_read(phydev, MII_88E1121_MISC_TEST);
        if (val < 0) {
                ret = val;
                goto error;
        }
 
        /* Disable temperature sensor */
-       ret = phy_write(phydev, MII_88E1121_MISC_TEST,
-                       ret & ~MII_88E1121_MISC_TEST_TEMP_SENSOR_EN);
+       ret = __phy_write(phydev, MII_88E1121_MISC_TEST,
+                         ret & ~MII_88E1121_MISC_TEST_TEMP_SENSOR_EN);
        if (ret < 0)
                goto error;
 
        *temp = ((val & MII_88E1121_MISC_TEST_TEMP_MASK) - 5) * 5000;
 
 error:
-       marvell_set_page(phydev, oldpage);
-       mutex_unlock(&phydev->lock);
-
-       return ret;
+       return phy_restore_page(phydev, oldpage, ret);
 }
 
 static int m88e1121_hwmon_read(struct device *dev,
@@ -1659,118 +1593,64 @@ static const struct hwmon_chip_info m88e1121_hwmon_chip_info = {
 
 static int m88e1510_get_temp(struct phy_device *phydev, long *temp)
 {
-       int oldpage;
        int ret;
 
        *temp = 0;
 
-       mutex_lock(&phydev->lock);
-
-       oldpage = marvell_get_set_page(phydev, MII_MARVELL_MISC_TEST_PAGE);
-       if (oldpage < 0) {
-               mutex_unlock(&phydev->lock);
-               return oldpage;
-       }
-
-       ret = phy_read(phydev, MII_88E1510_TEMP_SENSOR);
+       ret = phy_read_paged(phydev, MII_MARVELL_MISC_TEST_PAGE,
+                            MII_88E1510_TEMP_SENSOR);
        if (ret < 0)
-               goto error;
+               return ret;
 
        *temp = ((ret & MII_88E1510_TEMP_SENSOR_MASK) - 25) * 1000;
 
-error:
-       marvell_set_page(phydev, oldpage);
-       mutex_unlock(&phydev->lock);
-
-       return ret;
+       return 0;
 }
 
 static int m88e1510_get_temp_critical(struct phy_device *phydev, long *temp)
 {
-       int oldpage;
        int ret;
 
        *temp = 0;
 
-       mutex_lock(&phydev->lock);
-
-       oldpage = marvell_get_set_page(phydev, MII_MARVELL_MISC_TEST_PAGE);
-       if (oldpage < 0) {
-               mutex_unlock(&phydev->lock);
-               return oldpage;
-       }
-
-       ret = phy_read(phydev, MII_88E1121_MISC_TEST);
+       ret = phy_read_paged(phydev, MII_MARVELL_MISC_TEST_PAGE,
+                            MII_88E1121_MISC_TEST);
        if (ret < 0)
-               goto error;
+               return ret;
 
        *temp = (((ret & MII_88E1510_MISC_TEST_TEMP_THRESHOLD_MASK) >>
                  MII_88E1510_MISC_TEST_TEMP_THRESHOLD_SHIFT) * 5) - 25;
        /* convert to mC */
        *temp *= 1000;
 
-error:
-       marvell_set_page(phydev, oldpage);
-       mutex_unlock(&phydev->lock);
-
-       return ret;
+       return 0;
 }
 
 static int m88e1510_set_temp_critical(struct phy_device *phydev, long temp)
 {
-       int oldpage;
-       int ret;
-
-       mutex_lock(&phydev->lock);
-
-       oldpage = marvell_get_set_page(phydev, MII_MARVELL_MISC_TEST_PAGE);
-       if (oldpage < 0) {
-               mutex_unlock(&phydev->lock);
-               return oldpage;
-       }
-
-       ret = phy_read(phydev, MII_88E1121_MISC_TEST);
-       if (ret < 0)
-               goto error;
-
        temp = temp / 1000;
        temp = clamp_val(DIV_ROUND_CLOSEST(temp, 5) + 5, 0, 0x1f);
-       ret = phy_write(phydev, MII_88E1121_MISC_TEST,
-                       (ret & ~MII_88E1510_MISC_TEST_TEMP_THRESHOLD_MASK) |
-                       (temp << MII_88E1510_MISC_TEST_TEMP_THRESHOLD_SHIFT));
-
-error:
-       marvell_set_page(phydev, oldpage);
-       mutex_unlock(&phydev->lock);
 
-       return ret;
+       return phy_modify_paged(phydev, MII_MARVELL_MISC_TEST_PAGE,
+                               MII_88E1121_MISC_TEST,
+                               MII_88E1510_MISC_TEST_TEMP_THRESHOLD_MASK,
+                               temp << MII_88E1510_MISC_TEST_TEMP_THRESHOLD_SHIFT);
 }
 
 static int m88e1510_get_temp_alarm(struct phy_device *phydev, long *alarm)
 {
-       int oldpage;
        int ret;
 
        *alarm = false;
 
-       mutex_lock(&phydev->lock);
-
-       oldpage = marvell_get_set_page(phydev, MII_MARVELL_MISC_TEST_PAGE);
-       if (oldpage < 0) {
-               mutex_unlock(&phydev->lock);
-               return oldpage;
-       }
-
-       ret = phy_read(phydev, MII_88E1121_MISC_TEST);
+       ret = phy_read_paged(phydev, MII_MARVELL_MISC_TEST_PAGE,
+                            MII_88E1121_MISC_TEST);
        if (ret < 0)
-               goto error;
-       *alarm = !!(ret & MII_88E1510_MISC_TEST_TEMP_IRQ);
+               return ret;
 
-error:
-       marvell_set_page(phydev, oldpage);
-       mutex_unlock(&phydev->lock);
+       *alarm = !!(ret & MII_88E1510_MISC_TEST_TEMP_IRQ);
 
-       return ret;
+       return 0;
 }
 
 static int m88e1510_hwmon_read(struct device *dev,
@@ -1966,6 +1846,8 @@ static struct phy_driver marvell_drivers[] = {
                .config_intr = &marvell_config_intr,
                .resume = &genphy_resume,
                .suspend = &genphy_suspend,
+               .read_page = marvell_read_page,
+               .write_page = marvell_write_page,
                .get_sset_count = marvell_get_sset_count,
                .get_strings = marvell_get_strings,
                .get_stats = marvell_get_stats,
@@ -1983,6 +1865,8 @@ static struct phy_driver marvell_drivers[] = {
                .config_intr = &marvell_config_intr,
                .resume = &genphy_resume,
                .suspend = &genphy_suspend,
+               .read_page = marvell_read_page,
+               .write_page = marvell_write_page,
                .get_sset_count = marvell_get_sset_count,
                .get_strings = marvell_get_strings,
                .get_stats = marvell_get_stats,
@@ -2001,6 +1885,8 @@ static struct phy_driver marvell_drivers[] = {
                .config_intr = &marvell_config_intr,
                .resume = &genphy_resume,
                .suspend = &genphy_suspend,
+               .read_page = marvell_read_page,
+               .write_page = marvell_write_page,
                .get_sset_count = marvell_get_sset_count,
                .get_strings = marvell_get_strings,
                .get_stats = marvell_get_stats,
@@ -2018,6 +1904,8 @@ static struct phy_driver marvell_drivers[] = {
                .config_intr = &marvell_config_intr,
                .resume = &genphy_resume,
                .suspend = &genphy_suspend,
+               .read_page = marvell_read_page,
+               .write_page = marvell_write_page,
                .get_sset_count = marvell_get_sset_count,
                .get_strings = marvell_get_strings,
                .get_stats = marvell_get_stats,
@@ -2037,6 +1925,8 @@ static struct phy_driver marvell_drivers[] = {
                .did_interrupt = &m88e1121_did_interrupt,
                .resume = &genphy_resume,
                .suspend = &genphy_suspend,
+               .read_page = marvell_read_page,
+               .write_page = marvell_write_page,
                .get_sset_count = marvell_get_sset_count,
                .get_strings = marvell_get_strings,
                .get_stats = marvell_get_stats,
@@ -2058,6 +1948,8 @@ static struct phy_driver marvell_drivers[] = {
                .set_wol = &m88e1318_set_wol,
                .resume = &genphy_resume,
                .suspend = &genphy_suspend,
+               .read_page = marvell_read_page,
+               .write_page = marvell_write_page,
                .get_sset_count = marvell_get_sset_count,
                .get_strings = marvell_get_strings,
                .get_stats = marvell_get_stats,
@@ -2070,11 +1962,14 @@ static struct phy_driver marvell_drivers[] = {
                .flags = PHY_HAS_INTERRUPT,
                .probe = marvell_probe,
                .config_init = &m88e1145_config_init,
-               .config_aneg = &marvell_config_aneg,
+               .config_aneg = &m88e1101_config_aneg,
+               .read_status = &genphy_read_status,
                .ack_interrupt = &marvell_ack_interrupt,
                .config_intr = &marvell_config_intr,
                .resume = &genphy_resume,
                .suspend = &genphy_suspend,
+               .read_page = marvell_read_page,
+               .write_page = marvell_write_page,
                .get_sset_count = marvell_get_sset_count,
                .get_strings = marvell_get_strings,
                .get_stats = marvell_get_stats,
@@ -2092,6 +1987,8 @@ static struct phy_driver marvell_drivers[] = {
                .config_intr = &marvell_config_intr,
                .resume = &genphy_resume,
                .suspend = &genphy_suspend,
+               .read_page = marvell_read_page,
+               .write_page = marvell_write_page,
                .get_sset_count = marvell_get_sset_count,
                .get_strings = marvell_get_strings,
                .get_stats = marvell_get_stats,
@@ -2109,6 +2006,8 @@ static struct phy_driver marvell_drivers[] = {
                .config_intr = &marvell_config_intr,
                .resume = &genphy_resume,
                .suspend = &genphy_suspend,
+               .read_page = marvell_read_page,
+               .write_page = marvell_write_page,
                .get_sset_count = marvell_get_sset_count,
                .get_strings = marvell_get_strings,
                .get_stats = marvell_get_stats,
@@ -2125,6 +2024,8 @@ static struct phy_driver marvell_drivers[] = {
                .config_intr = &marvell_config_intr,
                .resume = &genphy_resume,
                .suspend = &genphy_suspend,
+               .read_page = marvell_read_page,
+               .write_page = marvell_write_page,
                .get_sset_count = marvell_get_sset_count,
                .get_strings = marvell_get_strings,
                .get_stats = marvell_get_stats,
@@ -2146,6 +2047,8 @@ static struct phy_driver marvell_drivers[] = {
                .set_wol = &m88e1318_set_wol,
                .resume = &marvell_resume,
                .suspend = &marvell_suspend,
+               .read_page = marvell_read_page,
+               .write_page = marvell_write_page,
                .get_sset_count = marvell_get_sset_count,
                .get_strings = marvell_get_strings,
                .get_stats = marvell_get_stats,
@@ -2166,6 +2069,8 @@ static struct phy_driver marvell_drivers[] = {
                .did_interrupt = &m88e1121_did_interrupt,
                .resume = &genphy_resume,
                .suspend = &genphy_suspend,
+               .read_page = marvell_read_page,
+               .write_page = marvell_write_page,
                .get_sset_count = marvell_get_sset_count,
                .get_strings = marvell_get_strings,
                .get_stats = marvell_get_stats,
@@ -2185,6 +2090,8 @@ static struct phy_driver marvell_drivers[] = {
                .did_interrupt = &m88e1121_did_interrupt,
                .resume = &genphy_resume,
                .suspend = &genphy_suspend,
+               .read_page = marvell_read_page,
+               .write_page = marvell_write_page,
                .get_sset_count = marvell_get_sset_count,
                .get_strings = marvell_get_strings,
                .get_stats = marvell_get_stats,
@@ -2204,6 +2111,8 @@ static struct phy_driver marvell_drivers[] = {
                .did_interrupt = &m88e1121_did_interrupt,
                .resume = &genphy_resume,
                .suspend = &genphy_suspend,
+               .read_page = marvell_read_page,
+               .write_page = marvell_write_page,
                .get_sset_count = marvell_get_sset_count,
                .get_strings = marvell_get_strings,
                .get_stats = marvell_get_stats,
@@ -2223,6 +2132,8 @@ static struct phy_driver marvell_drivers[] = {
                .did_interrupt = &m88e1121_did_interrupt,
                .resume = &genphy_resume,
                .suspend = &genphy_suspend,
+               .read_page = marvell_read_page,
+               .write_page = marvell_write_page,
                .get_sset_count = marvell_get_sset_count,
                .get_strings = marvell_get_strings,
                .get_stats = marvell_get_stats,
index f0cfba4..8a0bd98 100644 (file)
@@ -6,12 +6,18 @@
  *
  * There appears to be several different data paths through the PHY which
  * are automatically managed by the PHY.  The following has been determined
- * via observation and experimentation:
+ * via observation and experimentation for a setup using single-lane Serdes:
  *
  *       SGMII PHYXS -- BASE-T PCS -- 10G PMA -- AN -- Copper (for <= 1G)
  *  10GBASE-KR PHYXS -- BASE-T PCS -- 10G PMA -- AN -- Copper (for 10G)
  *  10GBASE-KR PHYXS -- BASE-R PCS -- Fiber
  *
+ * With XAUI, observation shows:
+ *
+ *        XAUI PHYXS -- <appropriate PCS as above>
+ *
+ * and no switching of the host interface mode occurs.
+ *
  * If both the fiber and copper ports are connected, the first to gain
  * link takes priority and the other port is completely locked out.
  */
@@ -23,19 +29,17 @@ enum {
        MV_PCS_BASE_R           = 0x1000,
        MV_PCS_1000BASEX        = 0x2000,
 
+       MV_PCS_PAIRSWAP         = 0x8182,
+       MV_PCS_PAIRSWAP_MASK    = 0x0003,
+       MV_PCS_PAIRSWAP_AB      = 0x0002,
+       MV_PCS_PAIRSWAP_NONE    = 0x0003,
+
        /* These registers appear at 0x800X and 0xa00X - the 0xa00X control
         * registers appear to set themselves to the 0x800X when AN is
         * restarted, but status registers appear readable from either.
         */
        MV_AN_CTRL1000          = 0x8000, /* 1000base-T control register */
        MV_AN_STAT1000          = 0x8001, /* 1000base-T status register */
-
-       /* This register appears to reflect the copper status */
-       MV_AN_RESULT            = 0xa016,
-       MV_AN_RESULT_SPD_10     = BIT(12),
-       MV_AN_RESULT_SPD_100    = BIT(13),
-       MV_AN_RESULT_SPD_1000   = BIT(14),
-       MV_AN_RESULT_SPD_10000  = BIT(15),
 };
 
 static int mv3310_modify(struct phy_device *phydev, int devad, u16 reg,
@@ -149,12 +153,18 @@ static int mv3310_config_init(struct phy_device *phydev)
                if (val & MDIO_PMA_EXTABLE_1000BKX)
                        __set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
                                  supported);
-               if (val & MDIO_PMA_EXTABLE_100BTX)
+               if (val & MDIO_PMA_EXTABLE_100BTX) {
                        __set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
                                  supported);
-               if (val & MDIO_PMA_EXTABLE_10BT)
+                       __set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
+                                 supported);
+               }
+               if (val & MDIO_PMA_EXTABLE_10BT) {
                        __set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
                                  supported);
+                       __set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT,
+                                 supported);
+               }
        }
 
        if (!ethtool_convert_link_mode_to_legacy_u32(&mask, supported))
@@ -174,6 +184,9 @@ static int mv3310_config_aneg(struct phy_device *phydev)
        u32 advertising;
        int ret;
 
+       /* We don't support manual MDI control */
+       phydev->mdix_ctrl = ETH_TP_MDI_AUTO;
+
        if (phydev->autoneg == AUTONEG_DISABLE) {
                ret = genphy_c45_pma_setup_forced(phydev);
                if (ret < 0)
@@ -232,6 +245,24 @@ static int mv3310_aneg_done(struct phy_device *phydev)
        return genphy_c45_aneg_done(phydev);
 }
 
+static void mv3310_update_interface(struct phy_device *phydev)
+{
+       if ((phydev->interface == PHY_INTERFACE_MODE_SGMII ||
+            phydev->interface == PHY_INTERFACE_MODE_10GKR) && phydev->link) {
+               /* The PHY automatically switches its serdes interface (and
+                * active PHYXS instance) between Cisco SGMII and 10GBase-KR
+                * modes according to the speed.  Florian suggests setting
+                * phydev->interface to communicate this to the MAC. Only do
+                * this if we are already in either SGMII or 10GBase-KR mode.
+                */
+               if (phydev->speed == SPEED_10000)
+                       phydev->interface = PHY_INTERFACE_MODE_10GKR;
+               else if (phydev->speed >= SPEED_10 &&
+                        phydev->speed < SPEED_10000)
+                       phydev->interface = PHY_INTERFACE_MODE_SGMII;
+       }
+}
+
 /* 10GBASE-ER,LR,LRM,SR do not support autonegotiation. */
 static int mv3310_read_10gbr_status(struct phy_device *phydev)
 {
@@ -239,8 +270,7 @@ static int mv3310_read_10gbr_status(struct phy_device *phydev)
        phydev->speed = SPEED_10000;
        phydev->duplex = DUPLEX_FULL;
 
-       if (phydev->interface == PHY_INTERFACE_MODE_SGMII)
-               phydev->interface = PHY_INTERFACE_MODE_10GKR;
+       mv3310_update_interface(phydev);
 
        return 0;
 }
@@ -263,6 +293,7 @@ static int mv3310_read_status(struct phy_device *phydev)
        phydev->link = 0;
        phydev->pause = 0;
        phydev->asym_pause = 0;
+       phydev->mdix = 0;
 
        val = phy_read_mmd(phydev, MDIO_MMD_PCS, MV_PCS_BASE_R + MDIO_STAT1);
        if (val < 0)
@@ -293,22 +324,8 @@ static int mv3310_read_status(struct phy_device *phydev)
 
                phydev->lp_advertising |= mii_stat1000_to_ethtool_lpa_t(val);
 
-               if (phydev->autoneg == AUTONEG_ENABLE) {
-                       val = phy_read_mmd(phydev, MDIO_MMD_AN, MV_AN_RESULT);
-                       if (val < 0)
-                               return val;
-
-                       if (val & MV_AN_RESULT_SPD_10000)
-                               phydev->speed = SPEED_10000;
-                       else if (val & MV_AN_RESULT_SPD_1000)
-                               phydev->speed = SPEED_1000;
-                       else if (val & MV_AN_RESULT_SPD_100)
-                               phydev->speed = SPEED_100;
-                       else if (val & MV_AN_RESULT_SPD_10)
-                               phydev->speed = SPEED_10;
-
-                       phydev->duplex = DUPLEX_FULL;
-               }
+               if (phydev->autoneg == AUTONEG_ENABLE)
+                       phy_resolve_aneg_linkmode(phydev);
        }
 
        if (phydev->autoneg != AUTONEG_ENABLE) {
@@ -317,21 +334,30 @@ static int mv3310_read_status(struct phy_device *phydev)
                        return val;
        }
 
-       if ((phydev->interface == PHY_INTERFACE_MODE_SGMII ||
-            phydev->interface == PHY_INTERFACE_MODE_10GKR) && phydev->link) {
-               /* The PHY automatically switches its serdes interface (and
-                * active PHYXS instance) between Cisco SGMII and 10GBase-KR
-                * modes according to the speed.  Florian suggests setting
-                * phydev->interface to communicate this to the MAC. Only do
-                * this if we are already in either SGMII or 10GBase-KR mode.
-                */
-               if (phydev->speed == SPEED_10000)
-                       phydev->interface = PHY_INTERFACE_MODE_10GKR;
-               else if (phydev->speed >= SPEED_10 &&
-                        phydev->speed < SPEED_10000)
-                       phydev->interface = PHY_INTERFACE_MODE_SGMII;
+       if (phydev->speed == SPEED_10000) {
+               val = genphy_c45_read_mdix(phydev);
+               if (val < 0)
+                       return val;
+       } else {
+               val = phy_read_mmd(phydev, MDIO_MMD_PCS, MV_PCS_PAIRSWAP);
+               if (val < 0)
+                       return val;
+
+               switch (val & MV_PCS_PAIRSWAP_MASK) {
+               case MV_PCS_PAIRSWAP_AB:
+                       phydev->mdix = ETH_TP_MDI_X;
+                       break;
+               case MV_PCS_PAIRSWAP_NONE:
+                       phydev->mdix = ETH_TP_MDI;
+                       break;
+               default:
+                       phydev->mdix = ETH_TP_MDI_INVALID;
+                       break;
+               }
        }
 
+       mv3310_update_interface(phydev);
+
        return 0;
 }
 
@@ -341,7 +367,9 @@ static struct phy_driver mv3310_drivers[] = {
                .phy_id_mask    = MARVELL_PHY_ID_MASK,
                .name           = "mv88x3310",
                .features       = SUPPORTED_10baseT_Full |
+                                 SUPPORTED_10baseT_Half |
                                  SUPPORTED_100baseT_Full |
+                                 SUPPORTED_100baseT_Half |
                                  SUPPORTED_1000baseT_Full |
                                  SUPPORTED_Autoneg |
                                  SUPPORTED_TP |
index bfd3090..07c6048 100644 (file)
@@ -194,8 +194,11 @@ static int xgene_mdio_reset(struct xgene_mdio_pdata *pdata)
        }
 
        ret = xgene_enet_ecc_init(pdata);
-       if (ret)
+       if (ret) {
+               if (pdata->dev->of_node)
+                       clk_disable_unprepare(pdata->clk);
                return ret;
+       }
        xgene_gmac_reset(pdata);
 
        return 0;
@@ -388,8 +391,10 @@ static int xgene_mdio_probe(struct platform_device *pdev)
                return ret;
 
        mdio_bus = mdiobus_alloc();
-       if (!mdio_bus)
-               return -ENOMEM;
+       if (!mdio_bus) {
+               ret = -ENOMEM;
+               goto out_clk;
+       }
 
        mdio_bus->name = "APM X-Gene MDIO bus";
 
@@ -418,7 +423,7 @@ static int xgene_mdio_probe(struct platform_device *pdev)
                mdio_bus->phy_mask = ~0;
                ret = mdiobus_register(mdio_bus);
                if (ret)
-                       goto out;
+                       goto out_mdiobus;
 
                acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_HANDLE(dev), 1,
                                    acpi_register_phy, NULL, mdio_bus, NULL);
@@ -426,16 +431,20 @@ static int xgene_mdio_probe(struct platform_device *pdev)
        }
 
        if (ret)
-               goto out;
+               goto out_mdiobus;
 
        pdata->mdio_bus = mdio_bus;
        xgene_mdio_status = true;
 
        return 0;
 
-out:
+out_mdiobus:
        mdiobus_free(mdio_bus);
 
+out_clk:
+       if (dev->of_node)
+               clk_disable_unprepare(pdata->clk);
+
        return ret;
 }
 
index a0f34c3..88272b3 100644 (file)
 
 #include "mdio-boardinfo.h"
 
-int mdiobus_register_device(struct mdio_device *mdiodev)
+static int mdiobus_register_gpiod(struct mdio_device *mdiodev)
 {
        struct gpio_desc *gpiod = NULL;
 
-       if (mdiodev->bus->mdio_map[mdiodev->addr])
-               return -EBUSY;
-
        /* Deassert the optional reset signal */
        if (mdiodev->dev.of_node)
                gpiod = fwnode_get_named_gpiod(&mdiodev->dev.of_node->fwnode,
@@ -69,6 +66,22 @@ int mdiobus_register_device(struct mdio_device *mdiodev)
        /* Assert the reset signal again */
        mdio_device_reset(mdiodev, 1);
 
+       return 0;
+}
+
+int mdiobus_register_device(struct mdio_device *mdiodev)
+{
+       int err;
+
+       if (mdiodev->bus->mdio_map[mdiodev->addr])
+               return -EBUSY;
+
+       if (mdiodev->flags & MDIO_DEVICE_FLAG_PHY) {
+               err = mdiobus_register_gpiod(mdiodev);
+               if (err)
+                       return err;
+       }
+
        mdiodev->bus->mdio_map[mdiodev->addr] = mdiodev;
 
        return 0;
@@ -515,6 +528,55 @@ struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr)
 EXPORT_SYMBOL(mdiobus_scan);
 
 /**
+ * __mdiobus_read - Unlocked version of the mdiobus_read function
+ * @bus: the mii_bus struct
+ * @addr: the phy address
+ * @regnum: register number to read
+ *
+ * Read a MDIO bus register. Caller must hold the mdio bus lock.
+ *
+ * NOTE: MUST NOT be called from interrupt context.
+ */
+int __mdiobus_read(struct mii_bus *bus, int addr, u32 regnum)
+{
+       int retval;
+
+       WARN_ON_ONCE(!mutex_is_locked(&bus->mdio_lock));
+
+       retval = bus->read(bus, addr, regnum);
+
+       trace_mdio_access(bus, 1, addr, regnum, retval, retval);
+
+       return retval;
+}
+EXPORT_SYMBOL(__mdiobus_read);
+
+/**
+ * __mdiobus_write - Unlocked version of the mdiobus_write function
+ * @bus: the mii_bus struct
+ * @addr: the phy address
+ * @regnum: register number to write
+ * @val: value to write to @regnum
+ *
+ * Write a MDIO bus register. Caller must hold the mdio bus lock.
+ *
+ * NOTE: MUST NOT be called from interrupt context.
+ */
+int __mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val)
+{
+       int err;
+
+       WARN_ON_ONCE(!mutex_is_locked(&bus->mdio_lock));
+
+       err = bus->write(bus, addr, regnum, val);
+
+       trace_mdio_access(bus, 0, addr, regnum, val, err);
+
+       return err;
+}
+EXPORT_SYMBOL(__mdiobus_write);
+
+/**
  * mdiobus_read_nested - Nested version of the mdiobus_read function
  * @bus: the mii_bus struct
  * @addr: the phy address
@@ -534,11 +596,9 @@ int mdiobus_read_nested(struct mii_bus *bus, int addr, u32 regnum)
        BUG_ON(in_interrupt());
 
        mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
-       retval = bus->read(bus, addr, regnum);
+       retval = __mdiobus_read(bus, addr, regnum);
        mutex_unlock(&bus->mdio_lock);
 
-       trace_mdio_access(bus, 1, addr, regnum, retval, retval);
-
        return retval;
 }
 EXPORT_SYMBOL(mdiobus_read_nested);
@@ -560,11 +620,9 @@ int mdiobus_read(struct mii_bus *bus, int addr, u32 regnum)
        BUG_ON(in_interrupt());
 
        mutex_lock(&bus->mdio_lock);
-       retval = bus->read(bus, addr, regnum);
+       retval = __mdiobus_read(bus, addr, regnum);
        mutex_unlock(&bus->mdio_lock);
 
-       trace_mdio_access(bus, 1, addr, regnum, retval, retval);
-
        return retval;
 }
 EXPORT_SYMBOL(mdiobus_read);
@@ -590,11 +648,9 @@ int mdiobus_write_nested(struct mii_bus *bus, int addr, u32 regnum, u16 val)
        BUG_ON(in_interrupt());
 
        mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
-       err = bus->write(bus, addr, regnum, val);
+       err = __mdiobus_write(bus, addr, regnum, val);
        mutex_unlock(&bus->mdio_lock);
 
-       trace_mdio_access(bus, 0, addr, regnum, val, err);
-
        return err;
 }
 EXPORT_SYMBOL(mdiobus_write_nested);
@@ -617,11 +673,9 @@ int mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val)
        BUG_ON(in_interrupt());
 
        mutex_lock(&bus->mdio_lock);
-       err = bus->write(bus, addr, regnum, val);
+       err = __mdiobus_write(bus, addr, regnum, val);
        mutex_unlock(&bus->mdio_lock);
 
-       trace_mdio_access(bus, 0, addr, regnum, val, err);
-
        return err;
 }
 EXPORT_SYMBOL(mdiobus_write);
index 843c1dd..c924700 100644 (file)
@@ -126,7 +126,7 @@ void mdio_device_reset(struct mdio_device *mdiodev, int value)
 
        gpiod_set_value(mdiodev->reset, value);
 
-       d = value ? mdiodev->reset_delay : mdiodev->reset_post_delay;
+       d = value ? mdiodev->reset_assert_delay : mdiodev->reset_deassert_delay;
        if (d)
                usleep_range(d, d + max_t(unsigned int, d / 10, 100));
 }
index fd500b1..0f45310 100644 (file)
@@ -624,6 +624,7 @@ static int ksz9031_read_status(struct phy_device *phydev)
                phydev->link = 0;
                if (phydev->drv->config_intr && phy_interrupt_is_valid(phydev))
                        phydev->drv->config_intr(phydev);
+               return genphy_config_aneg(phydev);
        }
 
        return 0;
index dada819..a457685 100644 (file)
@@ -233,6 +233,39 @@ int genphy_c45_read_pma(struct phy_device *phydev)
 }
 EXPORT_SYMBOL_GPL(genphy_c45_read_pma);
 
+/**
+ * genphy_c45_read_mdix - read mdix status from PMA
+ * @phydev: target phy_device struct
+ */
+int genphy_c45_read_mdix(struct phy_device *phydev)
+{
+       int val;
+
+       if (phydev->speed == SPEED_10000) {
+               val = phy_read_mmd(phydev, MDIO_MMD_PMAPMD,
+                                  MDIO_PMA_10GBT_SWAPPOL);
+               if (val < 0)
+                       return val;
+
+               switch (val) {
+               case MDIO_PMA_10GBT_SWAPPOL_ABNX | MDIO_PMA_10GBT_SWAPPOL_CDNX:
+                       phydev->mdix = ETH_TP_MDI;
+                       break;
+
+               case 0:
+                       phydev->mdix = ETH_TP_MDI_X;
+                       break;
+
+               default:
+                       phydev->mdix = ETH_TP_MDI_INVALID;
+                       break;
+               }
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(genphy_c45_read_mdix);
+
 /* The gen10g_* functions are the old Clause 45 stub */
 
 static int gen10g_config_aneg(struct phy_device *phydev)
index 21f75ae..44d09b1 100644 (file)
@@ -189,17 +189,61 @@ size_t phy_speeds(unsigned int *speeds, size_t size,
        return count;
 }
 
+/**
+ * phy_resolve_aneg_linkmode - resolve the advertisments into phy settings
+ * @phydev: The phy_device struct
+ *
+ * Resolve our and the link partner advertisments into their corresponding
+ * speed and duplex. If full duplex was negotiated, extract the pause mode
+ * from the link partner mask.
+ */
+void phy_resolve_aneg_linkmode(struct phy_device *phydev)
+{
+       u32 common = phydev->lp_advertising & phydev->advertising;
+
+       if (common & ADVERTISED_10000baseT_Full) {
+               phydev->speed = SPEED_10000;
+               phydev->duplex = DUPLEX_FULL;
+       } else if (common & ADVERTISED_1000baseT_Full) {
+               phydev->speed = SPEED_1000;
+               phydev->duplex = DUPLEX_FULL;
+       } else if (common & ADVERTISED_1000baseT_Half) {
+               phydev->speed = SPEED_1000;
+               phydev->duplex = DUPLEX_HALF;
+       } else if (common & ADVERTISED_100baseT_Full) {
+               phydev->speed = SPEED_100;
+               phydev->duplex = DUPLEX_FULL;
+       } else if (common & ADVERTISED_100baseT_Half) {
+               phydev->speed = SPEED_100;
+               phydev->duplex = DUPLEX_HALF;
+       } else if (common & ADVERTISED_10baseT_Full) {
+               phydev->speed = SPEED_10;
+               phydev->duplex = DUPLEX_FULL;
+       } else if (common & ADVERTISED_10baseT_Half) {
+               phydev->speed = SPEED_10;
+               phydev->duplex = DUPLEX_HALF;
+       }
+
+       if (phydev->duplex == DUPLEX_FULL) {
+               phydev->pause = !!(phydev->lp_advertising & ADVERTISED_Pause);
+               phydev->asym_pause = !!(phydev->lp_advertising &
+                                       ADVERTISED_Asym_Pause);
+       }
+}
+EXPORT_SYMBOL_GPL(phy_resolve_aneg_linkmode);
+
 static void mmd_phy_indirect(struct mii_bus *bus, int phy_addr, int devad,
                             u16 regnum)
 {
        /* Write the desired MMD Devad */
-       bus->write(bus, phy_addr, MII_MMD_CTRL, devad);
+       __mdiobus_write(bus, phy_addr, MII_MMD_CTRL, devad);
 
        /* Write the desired MMD register address */
-       bus->write(bus, phy_addr, MII_MMD_DATA, regnum);
+       __mdiobus_write(bus, phy_addr, MII_MMD_DATA, regnum);
 
        /* Select the Function : DATA with no post increment */
-       bus->write(bus, phy_addr, MII_MMD_CTRL, devad | MII_MMD_CTRL_NOINCR);
+       __mdiobus_write(bus, phy_addr, MII_MMD_CTRL,
+                       devad | MII_MMD_CTRL_NOINCR);
 }
 
 /**
@@ -232,7 +276,7 @@ int phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum)
                mmd_phy_indirect(bus, phy_addr, devad, regnum);
 
                /* Read the content of the MMD's selected register */
-               val = bus->read(bus, phy_addr, MII_MMD_DATA);
+               val = __mdiobus_read(bus, phy_addr, MII_MMD_DATA);
                mutex_unlock(&bus->mdio_lock);
        }
        return val;
@@ -271,7 +315,7 @@ int phy_write_mmd(struct phy_device *phydev, int devad, u32 regnum, u16 val)
                mmd_phy_indirect(bus, phy_addr, devad, regnum);
 
                /* Write the data into MMD's selected register */
-               bus->write(bus, phy_addr, MII_MMD_DATA, val);
+               __mdiobus_write(bus, phy_addr, MII_MMD_DATA, val);
                mutex_unlock(&bus->mdio_lock);
 
                ret = 0;
@@ -279,3 +323,208 @@ int phy_write_mmd(struct phy_device *phydev, int devad, u32 regnum, u16 val)
        return ret;
 }
 EXPORT_SYMBOL(phy_write_mmd);
+
+/**
+ * __phy_modify() - Convenience function for modifying a PHY register
+ * @phydev: a pointer to a &struct phy_device
+ * @regnum: register number
+ * @mask: bit mask of bits to clear
+ * @set: bit mask of bits to set
+ *
+ * Unlocked helper function which allows a PHY register to be modified as
+ * new register value = (old register value & mask) | set
+ */
+int __phy_modify(struct phy_device *phydev, u32 regnum, u16 mask, u16 set)
+{
+       int ret, res;
+
+       ret = __phy_read(phydev, regnum);
+       if (ret >= 0) {
+               res = __phy_write(phydev, regnum, (ret & ~mask) | set);
+               if (res < 0)
+                       ret = res;
+       }
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(__phy_modify);
+
+/**
+ * phy_modify - Convenience function for modifying a given PHY register
+ * @phydev: the phy_device struct
+ * @regnum: register number to write
+ * @mask: bit mask of bits to clear
+ * @set: new value of bits set in mask to write to @regnum
+ *
+ * NOTE: MUST NOT be called from interrupt context,
+ * because the bus read/write functions may wait for an interrupt
+ * to conclude the operation.
+ */
+int phy_modify(struct phy_device *phydev, u32 regnum, u16 mask, u16 set)
+{
+       int ret;
+
+       mutex_lock(&phydev->mdio.bus->mdio_lock);
+       ret = __phy_modify(phydev, regnum, mask, set);
+       mutex_unlock(&phydev->mdio.bus->mdio_lock);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(phy_modify);
+
+static int __phy_read_page(struct phy_device *phydev)
+{
+       return phydev->drv->read_page(phydev);
+}
+
+static int __phy_write_page(struct phy_device *phydev, int page)
+{
+       return phydev->drv->write_page(phydev, page);
+}
+
+/**
+ * phy_save_page() - take the bus lock and save the current page
+ * @phydev: a pointer to a &struct phy_device
+ *
+ * Take the MDIO bus lock, and return the current page number. On error,
+ * returns a negative errno. phy_restore_page() must always be called
+ * after this, irrespective of success or failure of this call.
+ */
+int phy_save_page(struct phy_device *phydev)
+{
+       mutex_lock(&phydev->mdio.bus->mdio_lock);
+       return __phy_read_page(phydev);
+}
+EXPORT_SYMBOL_GPL(phy_save_page);
+
+/**
+ * phy_select_page() - take the bus lock, save the current page, and set a page
+ * @phydev: a pointer to a &struct phy_device
+ * @page: desired page
+ *
+ * Take the MDIO bus lock to protect against concurrent access, save the
+ * current PHY page, and set the current page.  On error, returns a
+ * negative errno, otherwise returns the previous page number.
+ * phy_restore_page() must always be called after this, irrespective
+ * of success or failure of this call.
+ */
+int phy_select_page(struct phy_device *phydev, int page)
+{
+       int ret, oldpage;
+
+       oldpage = ret = phy_save_page(phydev);
+       if (ret < 0)
+               return ret;
+
+       if (oldpage != page) {
+               ret = __phy_write_page(phydev, page);
+               if (ret < 0)
+                       return ret;
+       }
+
+       return oldpage;
+}
+EXPORT_SYMBOL_GPL(phy_select_page);
+
+/**
+ * phy_restore_page() - restore the page register and release the bus lock
+ * @phydev: a pointer to a &struct phy_device
+ * @oldpage: the old page, return value from phy_save_page() or phy_select_page()
+ * @ret: operation's return code
+ *
+ * Release the MDIO bus lock, restoring @oldpage if it is a valid page.
+ * This function propagates the earliest error code from the group of
+ * operations.
+ *
+ * Returns:
+ *   @oldpage if it was a negative value, otherwise
+ *   @ret if it was a negative errno value, otherwise
+ *   phy_write_page()'s negative value if it were in error, otherwise
+ *   @ret.
+ */
+int phy_restore_page(struct phy_device *phydev, int oldpage, int ret)
+{
+       int r;
+
+       if (oldpage >= 0) {
+               r = __phy_write_page(phydev, oldpage);
+
+               /* Propagate the operation return code if the page write
+                * was successful.
+                */
+               if (ret >= 0 && r < 0)
+                       ret = r;
+       } else {
+               /* Propagate the phy page selection error code */
+               ret = oldpage;
+       }
+
+       mutex_unlock(&phydev->mdio.bus->mdio_lock);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(phy_restore_page);
+
+/**
+ * phy_read_paged() - Convenience function for reading a paged register
+ * @phydev: a pointer to a &struct phy_device
+ * @page: the page for the phy
+ * @regnum: register number
+ *
+ * Same rules as for phy_read().
+ */
+int phy_read_paged(struct phy_device *phydev, int page, u32 regnum)
+{
+       int ret = 0, oldpage;
+
+       oldpage = phy_select_page(phydev, page);
+       if (oldpage >= 0)
+               ret = __phy_read(phydev, regnum);
+
+       return phy_restore_page(phydev, oldpage, ret);
+}
+EXPORT_SYMBOL(phy_read_paged);
+
+/**
+ * phy_write_paged() - Convenience function for writing a paged register
+ * @phydev: a pointer to a &struct phy_device
+ * @page: the page for the phy
+ * @regnum: register number
+ * @val: value to write
+ *
+ * Same rules as for phy_write().
+ */
+int phy_write_paged(struct phy_device *phydev, int page, u32 regnum, u16 val)
+{
+       int ret = 0, oldpage;
+
+       oldpage = phy_select_page(phydev, page);
+       if (oldpage >= 0)
+               ret = __phy_write(phydev, regnum, val);
+
+       return phy_restore_page(phydev, oldpage, ret);
+}
+EXPORT_SYMBOL(phy_write_paged);
+
+/**
+ * phy_modify_paged() - Convenience function for modifying a paged register
+ * @phydev: a pointer to a &struct phy_device
+ * @page: the page for the phy
+ * @regnum: register number
+ * @mask: bit mask of bits to clear
+ * @set: bit mask of bits to set
+ *
+ * Same rules as for phy_read() and phy_write().
+ */
+int phy_modify_paged(struct phy_device *phydev, int page, u32 regnum,
+                    u16 mask, u16 set)
+{
+       int ret = 0, oldpage;
+
+       oldpage = phy_select_page(phydev, page);
+       if (oldpage >= 0)
+               ret = __phy_modify(phydev, regnum, mask, set);
+
+       return phy_restore_page(phydev, oldpage, ret);
+}
+EXPORT_SYMBOL(phy_modify_paged);
index be13b5d..2c5b2e0 100644 (file)
@@ -1368,9 +1368,8 @@ static int genphy_config_eee_advert(struct phy_device *phydev)
  */
 int genphy_setup_forced(struct phy_device *phydev)
 {
-       int ctl = phy_read(phydev, MII_BMCR);
+       u16 ctl = 0;
 
-       ctl &= BMCR_LOOPBACK | BMCR_ISOLATE | BMCR_PDOWN;
        phydev->pause = 0;
        phydev->asym_pause = 0;
 
@@ -1382,7 +1381,8 @@ int genphy_setup_forced(struct phy_device *phydev)
        if (DUPLEX_FULL == phydev->duplex)
                ctl |= BMCR_FULLDPLX;
 
-       return phy_write(phydev, MII_BMCR, ctl);
+       return phy_modify(phydev, MII_BMCR,
+                         BMCR_LOOPBACK | BMCR_ISOLATE | BMCR_PDOWN, ctl);
 }
 EXPORT_SYMBOL(genphy_setup_forced);
 
@@ -1392,17 +1392,9 @@ EXPORT_SYMBOL(genphy_setup_forced);
  */
 int genphy_restart_aneg(struct phy_device *phydev)
 {
-       int ctl = phy_read(phydev, MII_BMCR);
-
-       if (ctl < 0)
-               return ctl;
-
-       ctl |= BMCR_ANENABLE | BMCR_ANRESTART;
-
        /* Don't isolate the PHY if we're negotiating */
-       ctl &= ~BMCR_ISOLATE;
-
-       return phy_write(phydev, MII_BMCR, ctl);
+       return phy_modify(phydev, MII_BMCR, ~BMCR_ISOLATE,
+                         BMCR_ANENABLE | BMCR_ANRESTART);
 }
 EXPORT_SYMBOL(genphy_restart_aneg);
 
@@ -1668,44 +1660,20 @@ EXPORT_SYMBOL(genphy_config_init);
 
 int genphy_suspend(struct phy_device *phydev)
 {
-       int value;
-
-       mutex_lock(&phydev->lock);
-
-       value = phy_read(phydev, MII_BMCR);
-       phy_write(phydev, MII_BMCR, value | BMCR_PDOWN);
-
-       mutex_unlock(&phydev->lock);
-
-       return 0;
+       return phy_modify(phydev, MII_BMCR, 0, BMCR_PDOWN);
 }
 EXPORT_SYMBOL(genphy_suspend);
 
 int genphy_resume(struct phy_device *phydev)
 {
-       int value;
-
-       value = phy_read(phydev, MII_BMCR);
-       phy_write(phydev, MII_BMCR, value & ~BMCR_PDOWN);
-
-       return 0;
+       return phy_modify(phydev, MII_BMCR, ~BMCR_PDOWN, 0);
 }
 EXPORT_SYMBOL(genphy_resume);
 
 int genphy_loopback(struct phy_device *phydev, bool enable)
 {
-       int value;
-
-       value = phy_read(phydev, MII_BMCR);
-       if (value < 0)
-               return value;
-
-       if (enable)
-               value |= BMCR_LOOPBACK;
-       else
-               value &= ~BMCR_LOOPBACK;
-
-       return phy_write(phydev, MII_BMCR, value);
+       return phy_modify(phydev, MII_BMCR, ~BMCR_LOOPBACK,
+                         enable ? BMCR_LOOPBACK : 0);
 }
 EXPORT_SYMBOL(genphy_loopback);
 
index f7a7774..d1f9466 100644 (file)
@@ -567,6 +567,7 @@ struct phylink *phylink_create(struct net_device *ndev,
        pl->link_config.pause = MLO_PAUSE_AN;
        pl->link_config.speed = SPEED_UNKNOWN;
        pl->link_config.duplex = DUPLEX_UNKNOWN;
+       pl->link_config.an_enabled = true;
        pl->ops = ops;
        __set_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state);
 
@@ -725,6 +726,9 @@ int phylink_connect_phy(struct phylink *pl, struct phy_device *phy)
                     phy_interface_mode_is_8023z(pl->link_interface))))
                return -EINVAL;
 
+       if (pl->phydev)
+               return -EBUSY;
+
        /* Use PHY device/driver interface */
        if (pl->link_interface == PHY_INTERFACE_MODE_NA) {
                pl->link_interface = phy->interface;
@@ -1136,6 +1140,7 @@ int phylink_ethtool_ksettings_set(struct phylink *pl,
        mutex_lock(&pl->state_mutex);
        /* Configure the MAC to match the new settings */
        linkmode_copy(pl->link_config.advertising, our_kset.link_modes.advertising);
+       pl->link_config.interface = config.interface;
        pl->link_config.speed = our_kset.base.speed;
        pl->link_config.duplex = our_kset.base.duplex;
        pl->link_config.an_enabled = our_kset.base.autoneg != AUTONEG_DISABLE;
@@ -1573,7 +1578,7 @@ static int phylink_sfp_module_insert(void *upstream,
        __ETHTOOL_DECLARE_LINK_MODE_MASK(support) = { 0, };
        struct phylink_link_state config;
        phy_interface_t iface;
-       int mode, ret = 0;
+       int ret = 0;
        bool changed;
        u8 port;
 
@@ -1588,7 +1593,6 @@ static int phylink_sfp_module_insert(void *upstream,
        case PHY_INTERFACE_MODE_1000BASEX:
        case PHY_INTERFACE_MODE_2500BASEX:
        case PHY_INTERFACE_MODE_10GKR:
-               mode = MLO_AN_INBAND;
                break;
        default:
                return -EINVAL;
@@ -1606,13 +1610,15 @@ static int phylink_sfp_module_insert(void *upstream,
        ret = phylink_validate(pl, support, &config);
        if (ret) {
                netdev_err(pl->netdev, "validation of %s/%s with support %*pb failed: %d\n",
-                          phylink_an_mode_str(mode), phy_modes(config.interface),
+                          phylink_an_mode_str(MLO_AN_INBAND),
+                          phy_modes(config.interface),
                           __ETHTOOL_LINK_MODE_MASK_NBITS, support, ret);
                return ret;
        }
 
        netdev_dbg(pl->netdev, "requesting link mode %s/%s with support %*pb\n",
-                  phylink_an_mode_str(mode), phy_modes(config.interface),
+                  phylink_an_mode_str(MLO_AN_INBAND),
+                  phy_modes(config.interface),
                   __ETHTOOL_LINK_MODE_MASK_NBITS, support);
 
        if (phy_interface_mode_is_8023z(iface) && pl->phydev)
@@ -1625,15 +1631,15 @@ static int phylink_sfp_module_insert(void *upstream,
                linkmode_copy(pl->link_config.advertising, config.advertising);
        }
 
-       if (pl->link_an_mode != mode ||
+       if (pl->link_an_mode != MLO_AN_INBAND ||
            pl->link_config.interface != config.interface) {
                pl->link_config.interface = config.interface;
-               pl->link_an_mode = mode;
+               pl->link_an_mode = MLO_AN_INBAND;
 
                changed = true;
 
                netdev_info(pl->netdev, "switched to %s/%s link mode\n",
-                           phylink_an_mode_str(mode),
+                           phylink_an_mode_str(MLO_AN_INBAND),
                            phy_modes(config.interface));
        }
 
index 1356dba..3ecc378 100644 (file)
@@ -57,21 +57,19 @@ int sfp_parse_port(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
        case SFP_CONNECTOR_MT_RJ:
        case SFP_CONNECTOR_MU:
        case SFP_CONNECTOR_OPTICAL_PIGTAIL:
-               if (support)
-                       phylink_set(support, FIBRE);
                port = PORT_FIBRE;
                break;
 
        case SFP_CONNECTOR_RJ45:
-               if (support)
-                       phylink_set(support, TP);
                port = PORT_TP;
                break;
 
+       case SFP_CONNECTOR_COPPER_PIGTAIL:
+               port = PORT_DA;
+               break;
+
        case SFP_CONNECTOR_UNSPEC:
                if (id->base.e1000_base_t) {
-                       if (support)
-                               phylink_set(support, TP);
                        port = PORT_TP;
                        break;
                }
@@ -80,7 +78,6 @@ int sfp_parse_port(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
        case SFP_CONNECTOR_MPO_1X12:
        case SFP_CONNECTOR_MPO_2X16:
        case SFP_CONNECTOR_HSSDC_II:
-       case SFP_CONNECTOR_COPPER_PIGTAIL:
        case SFP_CONNECTOR_NOSEPARATE:
        case SFP_CONNECTOR_MXC_2X16:
                port = PORT_OTHER;
@@ -92,6 +89,18 @@ int sfp_parse_port(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
                break;
        }
 
+       if (support) {
+               switch (port) {
+               case PORT_FIBRE:
+                       phylink_set(support, FIBRE);
+                       break;
+
+               case PORT_TP:
+                       phylink_set(support, TP);
+                       break;
+               }
+       }
+
        return port;
 }
 EXPORT_SYMBOL_GPL(sfp_parse_port);
@@ -143,6 +152,11 @@ phy_interface_t sfp_parse_interface(struct sfp_bus *bus,
                break;
 
        default:
+               if (id->base.e1000_base_cx) {
+                       iface = PHY_INTERFACE_MODE_1000BASEX;
+                       break;
+               }
+
                iface = PHY_INTERFACE_MODE_NA;
                dev_err(bus->sfp_dev,
                        "SFP module encoding does not support 8b10b nor 64b66b\n");
@@ -165,10 +179,26 @@ EXPORT_SYMBOL_GPL(sfp_parse_interface);
 void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
                       unsigned long *support)
 {
+       unsigned int br_min, br_nom, br_max;
+
        phylink_set(support, Autoneg);
        phylink_set(support, Pause);
        phylink_set(support, Asym_Pause);
 
+       /* Decode the bitrate information to MBd */
+       br_min = br_nom = br_max = 0;
+       if (id->base.br_nominal) {
+               if (id->base.br_nominal != 255) {
+                       br_nom = id->base.br_nominal * 100;
+                       br_min = br_nom + id->base.br_nominal * id->ext.br_min;
+                       br_max = br_nom + id->base.br_nominal * id->ext.br_max;
+               } else if (id->ext.br_max) {
+                       br_nom = 250 * id->ext.br_max;
+                       br_max = br_nom + br_nom * id->ext.br_min / 100;
+                       br_min = br_nom - br_nom * id->ext.br_min / 100;
+               }
+       }
+
        /* Set ethtool support from the compliance fields. */
        if (id->base.e10g_base_sr)
                phylink_set(support, 10000baseSR_Full);
@@ -187,6 +217,34 @@ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
                phylink_set(support, 1000baseT_Full);
        }
 
+       /* 1000Base-PX or 1000Base-BX10 */
+       if ((id->base.e_base_px || id->base.e_base_bx10) &&
+           br_min <= 1300 && br_max >= 1200)
+               phylink_set(support, 1000baseX_Full);
+
+       /* For active or passive cables, select the link modes
+        * based on the bit rates and the cable compliance bytes.
+        */
+       if ((id->base.sfp_ct_passive || id->base.sfp_ct_active) && br_nom) {
+               /* This may look odd, but some manufacturers use 12000MBd */
+               if (br_min <= 12000 && br_max >= 10300)
+                       phylink_set(support, 10000baseCR_Full);
+               if (br_min <= 3200 && br_max >= 3100)
+                       phylink_set(support, 2500baseX_Full);
+               if (br_min <= 1300 && br_max >= 1200)
+                       phylink_set(support, 1000baseX_Full);
+       }
+       if (id->base.sfp_ct_passive) {
+               if (id->base.passive.sff8431_app_e)
+                       phylink_set(support, 10000baseCR_Full);
+       }
+       if (id->base.sfp_ct_active) {
+               if (id->base.active.sff8431_app_e ||
+                   id->base.active.sff8431_lim) {
+                       phylink_set(support, 10000baseCR_Full);
+               }
+       }
+
        switch (id->base.extended_cc) {
        case 0x00: /* Unspecified */
                break;
@@ -220,35 +278,6 @@ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
                if (id->base.br_nominal >= 12)
                        phylink_set(support, 1000baseX_Full);
        }
-
-       switch (id->base.connector) {
-       case SFP_CONNECTOR_SC:
-       case SFP_CONNECTOR_FIBERJACK:
-       case SFP_CONNECTOR_LC:
-       case SFP_CONNECTOR_MT_RJ:
-       case SFP_CONNECTOR_MU:
-       case SFP_CONNECTOR_OPTICAL_PIGTAIL:
-               break;
-
-       case SFP_CONNECTOR_UNSPEC:
-               if (id->base.e1000_base_t)
-                       break;
-
-       case SFP_CONNECTOR_SG: /* guess */
-       case SFP_CONNECTOR_MPO_1X12:
-       case SFP_CONNECTOR_MPO_2X16:
-       case SFP_CONNECTOR_HSSDC_II:
-       case SFP_CONNECTOR_COPPER_PIGTAIL:
-       case SFP_CONNECTOR_NOSEPARATE:
-       case SFP_CONNECTOR_MXC_2X16:
-       default:
-               /* a guess at the supported link modes */
-               dev_warn(bus->sfp_dev,
-                        "Guessing link modes, please report...\n");
-               phylink_set(support, 1000baseT_Half);
-               phylink_set(support, 1000baseT_Full);
-               break;
-       }
 }
 EXPORT_SYMBOL_GPL(sfp_parse_support);
 
index ee6b2e0..6c7d928 100644 (file)
@@ -466,11 +466,6 @@ static int sfp_sm_mod_probe(struct sfp *sfp)
 {
        /* SFP module inserted - read I2C data */
        struct sfp_eeprom_id id;
-       char vendor[17];
-       char part[17];
-       char sn[17];
-       char date[9];
-       char rev[5];
        u8 check;
        int err;
 
@@ -506,19 +501,12 @@ static int sfp_sm_mod_probe(struct sfp *sfp)
 
        sfp->id = id;
 
-       memcpy(vendor, sfp->id.base.vendor_name, 16);
-       vendor[16] = '\0';
-       memcpy(part, sfp->id.base.vendor_pn, 16);
-       part[16] = '\0';
-       memcpy(rev, sfp->id.base.vendor_rev, 4);
-       rev[4] = '\0';
-       memcpy(sn, sfp->id.ext.vendor_sn, 16);
-       sn[16] = '\0';
-       memcpy(date, sfp->id.ext.datecode, 8);
-       date[8] = '\0';
-
-       dev_info(sfp->dev, "module %s %s rev %s sn %s dc %s\n",
-                vendor, part, rev, sn, date);
+       dev_info(sfp->dev, "module %.*s %.*s rev %.*s sn %.*s dc %.*s\n",
+                (int)sizeof(id.base.vendor_name), id.base.vendor_name,
+                (int)sizeof(id.base.vendor_pn), id.base.vendor_pn,
+                (int)sizeof(id.base.vendor_rev), id.base.vendor_rev,
+                (int)sizeof(id.ext.vendor_sn), id.ext.vendor_sn,
+                (int)sizeof(id.ext.datecode), id.ext.datecode);
 
        /* Check whether we support this module */
        if (!sfp->type->module_supported(&sfp->id)) {
index e367d63..e7c5f4b 100644 (file)
@@ -180,6 +180,7 @@ struct tun_file {
        struct list_head next;
        struct tun_struct *detached;
        struct skb_array tx_array;
+       struct xdp_rxq_info xdp_rxq;
 };
 
 struct tun_flow_entry {
@@ -687,8 +688,10 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
                            tun->dev->reg_state == NETREG_REGISTERED)
                                unregister_netdevice(tun->dev);
                }
-               if (tun)
+               if (tun) {
                        skb_array_cleanup(&tfile->tx_array);
+                       xdp_rxq_info_unreg(&tfile->xdp_rxq);
+               }
                sock_put(&tfile->sk);
        }
 }
@@ -728,11 +731,13 @@ static void tun_detach_all(struct net_device *dev)
                tun_napi_del(tun, tfile);
                /* Drop read queue */
                tun_queue_purge(tfile);
+               xdp_rxq_info_unreg(&tfile->xdp_rxq);
                sock_put(&tfile->sk);
        }
        list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) {
                tun_enable_queue(tfile);
                tun_queue_purge(tfile);
+               xdp_rxq_info_unreg(&tfile->xdp_rxq);
                sock_put(&tfile->sk);
        }
        BUG_ON(tun->numdisabled != 0);
@@ -784,6 +789,22 @@ static int tun_attach(struct tun_struct *tun, struct file *file,
 
        tfile->queue_index = tun->numqueues;
        tfile->socket.sk->sk_shutdown &= ~RCV_SHUTDOWN;
+
+       if (tfile->detached) {
+               /* Re-attach detached tfile, updating XDP queue_index */
+               WARN_ON(!xdp_rxq_info_is_reg(&tfile->xdp_rxq));
+
+               if (tfile->xdp_rxq.queue_index    != tfile->queue_index)
+                       tfile->xdp_rxq.queue_index = tfile->queue_index;
+       } else {
+               /* Setup XDP RX-queue info, for new tfile getting attached */
+               err = xdp_rxq_info_reg(&tfile->xdp_rxq,
+                                      tun->dev, tfile->queue_index);
+               if (err < 0)
+                       goto out;
+               err = 0;
+       }
+
        rcu_assign_pointer(tfile->tun, tun);
        rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile);
        tun->numqueues++;
@@ -1508,6 +1529,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
                xdp.data = buf + pad;
                xdp_set_data_meta_invalid(&xdp);
                xdp.data_end = xdp.data + len;
+               xdp.rxq = &tfile->xdp_rxq;
                orig_data = xdp.data;
                act = bpf_prog_run_xdp(xdp_prog, &xdp);
 
index 6fb7b65..ed82993 100644 (file)
@@ -31,6 +31,7 @@
 #include <linux/average.h>
 #include <linux/filter.h>
 #include <net/route.h>
+#include <net/xdp.h>
 
 static int napi_weight = NAPI_POLL_WEIGHT;
 module_param(napi_weight, int, 0444);
@@ -115,6 +116,8 @@ struct receive_queue {
 
        /* Name of this receive queue: input.$index */
        char name[40];
+
+       struct xdp_rxq_info xdp_rxq;
 };
 
 struct virtnet_info {
@@ -559,6 +562,7 @@ static struct sk_buff *receive_small(struct net_device *dev,
                xdp.data = xdp.data_hard_start + xdp_headroom;
                xdp_set_data_meta_invalid(&xdp);
                xdp.data_end = xdp.data + len;
+               xdp.rxq = &rq->xdp_rxq;
                orig_data = xdp.data;
                act = bpf_prog_run_xdp(xdp_prog, &xdp);
 
@@ -692,6 +696,8 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
                xdp.data = data + vi->hdr_len;
                xdp_set_data_meta_invalid(&xdp);
                xdp.data_end = xdp.data + (len - vi->hdr_len);
+               xdp.rxq = &rq->xdp_rxq;
+
                act = bpf_prog_run_xdp(xdp_prog, &xdp);
 
                if (act != XDP_PASS)
@@ -1225,13 +1231,18 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
 static int virtnet_open(struct net_device *dev)
 {
        struct virtnet_info *vi = netdev_priv(dev);
-       int i;
+       int i, err;
 
        for (i = 0; i < vi->max_queue_pairs; i++) {
                if (i < vi->curr_queue_pairs)
                        /* Make sure we have some buffers: if oom use wq. */
                        if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
                                schedule_delayed_work(&vi->refill, 0);
+
+               err = xdp_rxq_info_reg(&vi->rq[i].xdp_rxq, dev, i);
+               if (err < 0)
+                       return err;
+
                virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
                virtnet_napi_tx_enable(vi, vi->sq[i].vq, &vi->sq[i].napi);
        }
@@ -1560,6 +1571,7 @@ static int virtnet_close(struct net_device *dev)
        cancel_delayed_work_sync(&vi->refill);
 
        for (i = 0; i < vi->max_queue_pairs; i++) {
+               xdp_rxq_info_unreg(&vi->rq[i].xdp_rxq);
                napi_disable(&vi->rq[i].napi);
                virtnet_napi_tx_disable(&vi->sq[i].napi);
        }
index 48a0dc2..82090ae 100644 (file)
@@ -2155,6 +2155,13 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
                }
 
                ndst = &rt->dst;
+               if (skb_dst(skb)) {
+                       int mtu = dst_mtu(ndst) - VXLAN_HEADROOM;
+
+                       skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL,
+                                                      skb, mtu);
+               }
+
                tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
                ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
                err = vxlan_build_skb(skb, ndst, sizeof(struct iphdr),
@@ -2190,6 +2197,13 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
                                goto out_unlock;
                }
 
+               if (skb_dst(skb)) {
+                       int mtu = dst_mtu(ndst) - VXLAN6_HEADROOM;
+
+                       skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL,
+                                                      skb, mtu);
+               }
+
                tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
                ttl = ttl ? : ip6_dst_hoplimit(ndst);
                skb_scrub_packet(skb, xnet);
@@ -3103,6 +3117,11 @@ static void vxlan_config_apply(struct net_device *dev,
 
                max_mtu = lowerdev->mtu - (use_ipv6 ? VXLAN6_HEADROOM :
                                           VXLAN_HEADROOM);
+               if (max_mtu < ETH_MIN_MTU)
+                       max_mtu = ETH_MIN_MTU;
+
+               if (!changelink && !conf->mtu)
+                       dev->mtu = max_mtu;
        }
 
        if (dev->mtu > max_mtu)
index 8d9a59b..6739ac2 100644 (file)
@@ -21,6 +21,7 @@ ath10k_core-$(CONFIG_ATH10K_TRACING) += trace.o
 ath10k_core-$(CONFIG_THERMAL) += thermal.o
 ath10k_core-$(CONFIG_MAC80211_DEBUGFS) += debugfs_sta.o
 ath10k_core-$(CONFIG_PM) += wow.o
+ath10k_core-$(CONFIG_DEV_COREDUMP) += coredump.o
 
 obj-$(CONFIG_ATH10K_PCI) += ath10k_pci.o
 ath10k_pci-y += pci.o \
index ff6815e..35d1049 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016 Qualcomm Atheros, Inc. All rights reserved.
+ * Copyright (c) 2016-2017 Qualcomm Atheros, Inc. All rights reserved.
  * Copyright (c) 2015 The Linux Foundation. All rights reserved.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
index 2d3a2f3..af4978d 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2014,2016-2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
index 9c0839b..9a39681 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2015,2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
index a8afd69..b9def7b 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -327,12 +327,12 @@ static inline void ath10k_ce_engine_int_status_clear(struct ath10k *ar,
  * Guts of ath10k_ce_send.
  * The caller takes responsibility for any needed locking.
  */
-int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
-                         void *per_transfer_context,
-                         u32 buffer,
-                         unsigned int nbytes,
-                         unsigned int transfer_id,
-                         unsigned int flags)
+static int _ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
+                                 void *per_transfer_context,
+                                 dma_addr_t buffer,
+                                 unsigned int nbytes,
+                                 unsigned int transfer_id,
+                                 unsigned int flags)
 {
        struct ath10k *ar = ce_state->ar;
        struct ath10k_ce_ring *src_ring = ce_state->src_ring;
@@ -384,6 +384,87 @@ exit:
        return ret;
 }
 
+static int _ath10k_ce_send_nolock_64(struct ath10k_ce_pipe *ce_state,
+                                    void *per_transfer_context,
+                                    dma_addr_t buffer,
+                                    unsigned int nbytes,
+                                    unsigned int transfer_id,
+                                    unsigned int flags)
+{
+       struct ath10k *ar = ce_state->ar;
+       struct ath10k_ce_ring *src_ring = ce_state->src_ring;
+       struct ce_desc_64 *desc, sdesc;
+       unsigned int nentries_mask = src_ring->nentries_mask;
+       unsigned int sw_index = src_ring->sw_index;
+       unsigned int write_index = src_ring->write_index;
+       u32 ctrl_addr = ce_state->ctrl_addr;
+       __le32 *addr;
+       u32 desc_flags = 0;
+       int ret = 0;
+
+       if (test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags))
+               return -ESHUTDOWN;
+
+       if (nbytes > ce_state->src_sz_max)
+               ath10k_warn(ar, "%s: send more we can (nbytes: %d, max: %d)\n",
+                           __func__, nbytes, ce_state->src_sz_max);
+
+       if (unlikely(CE_RING_DELTA(nentries_mask,
+                                  write_index, sw_index - 1) <= 0)) {
+               ret = -ENOSR;
+               goto exit;
+       }
+
+       desc = CE_SRC_RING_TO_DESC_64(src_ring->base_addr_owner_space,
+                                     write_index);
+
+       desc_flags |= SM(transfer_id, CE_DESC_FLAGS_META_DATA);
+
+       if (flags & CE_SEND_FLAG_GATHER)
+               desc_flags |= CE_DESC_FLAGS_GATHER;
+
+       if (flags & CE_SEND_FLAG_BYTE_SWAP)
+               desc_flags |= CE_DESC_FLAGS_BYTE_SWAP;
+
+       addr = (__le32 *)&sdesc.addr;
+
+       flags |= upper_32_bits(buffer) & CE_DESC_FLAGS_GET_MASK;
+       addr[0] = __cpu_to_le32(buffer);
+       addr[1] = __cpu_to_le32(flags);
+       if (flags & CE_SEND_FLAG_GATHER)
+               addr[1] |= __cpu_to_le32(CE_WCN3990_DESC_FLAGS_GATHER);
+       else
+               addr[1] &= ~(__cpu_to_le32(CE_WCN3990_DESC_FLAGS_GATHER));
+
+       sdesc.nbytes = __cpu_to_le16(nbytes);
+       sdesc.flags  = __cpu_to_le16(desc_flags);
+
+       *desc = sdesc;
+
+       src_ring->per_transfer_context[write_index] = per_transfer_context;
+
+       /* Update Source Ring Write Index */
+       write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
+
+       if (!(flags & CE_SEND_FLAG_GATHER))
+               ath10k_ce_src_ring_write_index_set(ar, ctrl_addr, write_index);
+
+       src_ring->write_index = write_index;
+exit:
+       return ret;
+}
+
+int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
+                         void *per_transfer_context,
+                         dma_addr_t buffer,
+                         unsigned int nbytes,
+                         unsigned int transfer_id,
+                         unsigned int flags)
+{
+       return ce_state->ops->ce_send_nolock(ce_state, per_transfer_context,
+                                   buffer, nbytes, transfer_id, flags);
+}
+
 void __ath10k_ce_send_revert(struct ath10k_ce_pipe *pipe)
 {
        struct ath10k *ar = pipe->ar;
@@ -413,7 +494,7 @@ void __ath10k_ce_send_revert(struct ath10k_ce_pipe *pipe)
 
 int ath10k_ce_send(struct ath10k_ce_pipe *ce_state,
                   void *per_transfer_context,
-                  u32 buffer,
+                  dma_addr_t buffer,
                   unsigned int nbytes,
                   unsigned int transfer_id,
                   unsigned int flags)
@@ -459,7 +540,8 @@ int __ath10k_ce_rx_num_free_bufs(struct ath10k_ce_pipe *pipe)
        return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
 }
 
-int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr)
+static int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx,
+                                  dma_addr_t paddr)
 {
        struct ath10k *ar = pipe->ar;
        struct ath10k_ce *ce = ath10k_ce_priv(ar);
@@ -488,6 +570,39 @@ int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr)
        return 0;
 }
 
+static int __ath10k_ce_rx_post_buf_64(struct ath10k_ce_pipe *pipe,
+                                     void *ctx,
+                                     dma_addr_t paddr)
+{
+       struct ath10k *ar = pipe->ar;
+       struct ath10k_ce *ce = ath10k_ce_priv(ar);
+       struct ath10k_ce_ring *dest_ring = pipe->dest_ring;
+       unsigned int nentries_mask = dest_ring->nentries_mask;
+       unsigned int write_index = dest_ring->write_index;
+       unsigned int sw_index = dest_ring->sw_index;
+       struct ce_desc_64 *base = dest_ring->base_addr_owner_space;
+       struct ce_desc_64 *desc =
+                       CE_DEST_RING_TO_DESC_64(base, write_index);
+       u32 ctrl_addr = pipe->ctrl_addr;
+
+       lockdep_assert_held(&ce->ce_lock);
+
+       if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) == 0)
+               return -ENOSPC;
+
+       desc->addr = __cpu_to_le64(paddr);
+       desc->addr &= __cpu_to_le64(CE_DESC_37BIT_ADDR_MASK);
+
+       desc->nbytes = 0;
+
+       dest_ring->per_transfer_context[write_index] = ctx;
+       write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
+       ath10k_ce_dest_ring_write_index_set(ar, ctrl_addr, write_index);
+       dest_ring->write_index = write_index;
+
+       return 0;
+}
+
 void ath10k_ce_rx_update_write_idx(struct ath10k_ce_pipe *pipe, u32 nentries)
 {
        struct ath10k *ar = pipe->ar;
@@ -508,14 +623,15 @@ void ath10k_ce_rx_update_write_idx(struct ath10k_ce_pipe *pipe, u32 nentries)
        dest_ring->write_index = write_index;
 }
 
-int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr)
+int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx,
+                         dma_addr_t paddr)
 {
        struct ath10k *ar = pipe->ar;
        struct ath10k_ce *ce = ath10k_ce_priv(ar);
        int ret;
 
        spin_lock_bh(&ce->ce_lock);
-       ret = __ath10k_ce_rx_post_buf(pipe, ctx, paddr);
+       ret = pipe->ops->ce_rx_post_buf(pipe, ctx, paddr);
        spin_unlock_bh(&ce->ce_lock);
 
        return ret;
@@ -525,9 +641,10 @@ int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr)
  * Guts of ath10k_ce_completed_recv_next.
  * The caller takes responsibility for any necessary locking.
  */
-int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
-                                        void **per_transfer_contextp,
-                                        unsigned int *nbytesp)
+static int
+        _ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
+                                              void **per_transfer_contextp,
+                                              unsigned int *nbytesp)
 {
        struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
        unsigned int nentries_mask = dest_ring->nentries_mask;
@@ -574,6 +691,64 @@ int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
        return 0;
 }
 
+static int
+_ath10k_ce_completed_recv_next_nolock_64(struct ath10k_ce_pipe *ce_state,
+                                        void **per_transfer_contextp,
+                                        unsigned int *nbytesp)
+{
+       struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
+       unsigned int nentries_mask = dest_ring->nentries_mask;
+       unsigned int sw_index = dest_ring->sw_index;
+       struct ce_desc_64 *base = dest_ring->base_addr_owner_space;
+       struct ce_desc_64 *desc =
+               CE_DEST_RING_TO_DESC_64(base, sw_index);
+       struct ce_desc_64 sdesc;
+       u16 nbytes;
+
+       /* Copy in one go for performance reasons */
+       sdesc = *desc;
+
+       nbytes = __le16_to_cpu(sdesc.nbytes);
+       if (nbytes == 0) {
+               /* This closes a relatively unusual race where the Host
+                * sees the updated DRRI before the update to the
+                * corresponding descriptor has completed. We treat this
+                * as a descriptor that is not yet done.
+                */
+               return -EIO;
+       }
+
+       desc->nbytes = 0;
+
+       /* Return data from completed destination descriptor */
+       *nbytesp = nbytes;
+
+       if (per_transfer_contextp)
+               *per_transfer_contextp =
+                       dest_ring->per_transfer_context[sw_index];
+
+       /* Copy engine 5 (HTT Rx) will reuse the same transfer context.
+        * So update transfer context all CEs except CE5.
+        */
+       if (ce_state->id != 5)
+               dest_ring->per_transfer_context[sw_index] = NULL;
+
+       /* Update sw_index */
+       sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
+       dest_ring->sw_index = sw_index;
+
+       return 0;
+}
+
+int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
+                                        void **per_transfer_ctx,
+                                        unsigned int *nbytesp)
+{
+       return ce_state->ops->ce_completed_recv_next_nolock(ce_state,
+                                                           per_transfer_ctx,
+                                                           nbytesp);
+}
+
 int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state,
                                  void **per_transfer_contextp,
                                  unsigned int *nbytesp)
@@ -583,17 +758,18 @@ int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state,
        int ret;
 
        spin_lock_bh(&ce->ce_lock);
-       ret = ath10k_ce_completed_recv_next_nolock(ce_state,
+       ret = ce_state->ops->ce_completed_recv_next_nolock(ce_state,
                                                   per_transfer_contextp,
                                                   nbytesp);
+
        spin_unlock_bh(&ce->ce_lock);
 
        return ret;
 }
 
-int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
-                              void **per_transfer_contextp,
-                              u32 *bufferp)
+static int _ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
+                                      void **per_transfer_contextp,
+                                      dma_addr_t *bufferp)
 {
        struct ath10k_ce_ring *dest_ring;
        unsigned int nentries_mask;
@@ -644,6 +820,69 @@ int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
        return ret;
 }
 
+static int _ath10k_ce_revoke_recv_next_64(struct ath10k_ce_pipe *ce_state,
+                                         void **per_transfer_contextp,
+                                         dma_addr_t *bufferp)
+{
+       struct ath10k_ce_ring *dest_ring;
+       unsigned int nentries_mask;
+       unsigned int sw_index;
+       unsigned int write_index;
+       int ret;
+       struct ath10k *ar;
+       struct ath10k_ce *ce;
+
+       dest_ring = ce_state->dest_ring;
+
+       if (!dest_ring)
+               return -EIO;
+
+       ar = ce_state->ar;
+       ce = ath10k_ce_priv(ar);
+
+       spin_lock_bh(&ce->ce_lock);
+
+       nentries_mask = dest_ring->nentries_mask;
+       sw_index = dest_ring->sw_index;
+       write_index = dest_ring->write_index;
+       if (write_index != sw_index) {
+               struct ce_desc_64 *base = dest_ring->base_addr_owner_space;
+               struct ce_desc_64 *desc =
+                       CE_DEST_RING_TO_DESC_64(base, sw_index);
+
+               /* Return data from completed destination descriptor */
+               *bufferp = __le64_to_cpu(desc->addr);
+
+               if (per_transfer_contextp)
+                       *per_transfer_contextp =
+                               dest_ring->per_transfer_context[sw_index];
+
+               /* sanity */
+               dest_ring->per_transfer_context[sw_index] = NULL;
+               desc->nbytes = 0;
+
+               /* Update sw_index */
+               sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
+               dest_ring->sw_index = sw_index;
+               ret = 0;
+       } else {
+               ret = -EIO;
+       }
+
+       spin_unlock_bh(&ce->ce_lock);
+
+       return ret;
+}
+
+int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
+                              void **per_transfer_contextp,
+                              dma_addr_t *bufferp)
+{
+       return ce_state->ops->ce_revoke_recv_next(ce_state,
+                                                 per_transfer_contextp,
+                                                 bufferp);
+}
+
 /*
  * Guts of ath10k_ce_completed_send_next.
  * The caller takes responsibility for any necessary locking.
@@ -698,10 +937,45 @@ int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
        return 0;
 }
 
+static void ath10k_ce_extract_desc_data(struct ath10k *ar,
+                                       struct ath10k_ce_ring *src_ring,
+                                       u32 sw_index,
+                                       dma_addr_t *bufferp,
+                                       u32 *nbytesp,
+                                       u32 *transfer_idp)
+{
+               struct ce_desc *base = src_ring->base_addr_owner_space;
+               struct ce_desc *desc = CE_SRC_RING_TO_DESC(base, sw_index);
+
+               /* Return data from completed source descriptor */
+               *bufferp = __le32_to_cpu(desc->addr);
+               *nbytesp = __le16_to_cpu(desc->nbytes);
+               *transfer_idp = MS(__le16_to_cpu(desc->flags),
+                                  CE_DESC_FLAGS_META_DATA);
+}
+
+static void ath10k_ce_extract_desc_data_64(struct ath10k *ar,
+                                          struct ath10k_ce_ring *src_ring,
+                                          u32 sw_index,
+                                          dma_addr_t *bufferp,
+                                          u32 *nbytesp,
+                                          u32 *transfer_idp)
+{
+               struct ce_desc_64 *base = src_ring->base_addr_owner_space;
+               struct ce_desc_64 *desc =
+                       CE_SRC_RING_TO_DESC_64(base, sw_index);
+
+               /* Return data from completed source descriptor */
+               *bufferp = __le64_to_cpu(desc->addr);
+               *nbytesp = __le16_to_cpu(desc->nbytes);
+               *transfer_idp = MS(__le16_to_cpu(desc->flags),
+                                  CE_DESC_FLAGS_META_DATA);
+}
+
 /* NB: Modeled after ath10k_ce_completed_send_next */
 int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state,
                               void **per_transfer_contextp,
-                              u32 *bufferp,
+                              dma_addr_t *bufferp,
                               unsigned int *nbytesp,
                               unsigned int *transfer_idp)
 {
@@ -728,14 +1002,9 @@ int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state,
        write_index = src_ring->write_index;
 
        if (write_index != sw_index) {
-               struct ce_desc *base = src_ring->base_addr_owner_space;
-               struct ce_desc *desc = CE_SRC_RING_TO_DESC(base, sw_index);
-
-               /* Return data from completed source descriptor */
-               *bufferp = __le32_to_cpu(desc->addr);
-               *nbytesp = __le16_to_cpu(desc->nbytes);
-               *transfer_idp = MS(__le16_to_cpu(desc->flags),
-                                               CE_DESC_FLAGS_META_DATA);
+               ce_state->ops->ce_extract_desc_data(ar, src_ring, sw_index,
+                                                   bufferp, nbytesp,
+                                                   transfer_idp);
 
                if (per_transfer_contextp)
                        *per_transfer_contextp =
@@ -897,8 +1166,12 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar,
 
        nentries = roundup_pow_of_two(attr->src_nentries);
 
-       memset(src_ring->base_addr_owner_space, 0,
-              nentries * sizeof(struct ce_desc));
+       if (ar->hw_params.target_64bit)
+               memset(src_ring->base_addr_owner_space, 0,
+                      nentries * sizeof(struct ce_desc_64));
+       else
+               memset(src_ring->base_addr_owner_space, 0,
+                      nentries * sizeof(struct ce_desc));
 
        src_ring->sw_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
        src_ring->sw_index &= src_ring->nentries_mask;
@@ -934,8 +1207,12 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar,
 
        nentries = roundup_pow_of_two(attr->dest_nentries);
 
-       memset(dest_ring->base_addr_owner_space, 0,
-              nentries * sizeof(struct ce_desc));
+       if (ar->hw_params.target_64bit)
+               memset(dest_ring->base_addr_owner_space, 0,
+                      nentries * sizeof(struct ce_desc_64));
+       else
+               memset(dest_ring->base_addr_owner_space, 0,
+                      nentries * sizeof(struct ce_desc));
 
        dest_ring->sw_index = ath10k_ce_dest_ring_read_index_get(ar, ctrl_addr);
        dest_ring->sw_index &= dest_ring->nentries_mask;
@@ -993,12 +1270,57 @@ ath10k_ce_alloc_src_ring(struct ath10k *ar, unsigned int ce_id,
 
        src_ring->base_addr_ce_space_unaligned = base_addr;
 
-       src_ring->base_addr_owner_space = PTR_ALIGN(
-                       src_ring->base_addr_owner_space_unaligned,
-                       CE_DESC_RING_ALIGN);
-       src_ring->base_addr_ce_space = ALIGN(
-                       src_ring->base_addr_ce_space_unaligned,
-                       CE_DESC_RING_ALIGN);
+       src_ring->base_addr_owner_space =
+                       PTR_ALIGN(src_ring->base_addr_owner_space_unaligned,
+                                 CE_DESC_RING_ALIGN);
+       src_ring->base_addr_ce_space =
+                       ALIGN(src_ring->base_addr_ce_space_unaligned,
+                             CE_DESC_RING_ALIGN);
+
+       return src_ring;
+}
+
+static struct ath10k_ce_ring *
+ath10k_ce_alloc_src_ring_64(struct ath10k *ar, unsigned int ce_id,
+                           const struct ce_attr *attr)
+{
+       struct ath10k_ce_ring *src_ring;
+       u32 nentries = attr->src_nentries;
+       dma_addr_t base_addr;
+
+       nentries = roundup_pow_of_two(nentries);
+
+       src_ring = kzalloc(sizeof(*src_ring) +
+                          (nentries *
+                           sizeof(*src_ring->per_transfer_context)),
+                          GFP_KERNEL);
+       if (!src_ring)
+               return ERR_PTR(-ENOMEM);
+
+       src_ring->nentries = nentries;
+       src_ring->nentries_mask = nentries - 1;
+
+       /* Legacy platforms that do not support cache
+        * coherent DMA are unsupported
+        */
+       src_ring->base_addr_owner_space_unaligned =
+               dma_alloc_coherent(ar->dev,
+                                  (nentries * sizeof(struct ce_desc_64) +
+                                   CE_DESC_RING_ALIGN),
+                                  &base_addr, GFP_KERNEL);
+       if (!src_ring->base_addr_owner_space_unaligned) {
+               kfree(src_ring);
+               return ERR_PTR(-ENOMEM);
+       }
+
+       src_ring->base_addr_ce_space_unaligned = base_addr;
+
+       src_ring->base_addr_owner_space =
+                       PTR_ALIGN(src_ring->base_addr_owner_space_unaligned,
+                                 CE_DESC_RING_ALIGN);
+       src_ring->base_addr_ce_space =
+                       ALIGN(src_ring->base_addr_ce_space_unaligned,
+                             CE_DESC_RING_ALIGN);
 
        return src_ring;
 }
@@ -1039,12 +1361,63 @@ ath10k_ce_alloc_dest_ring(struct ath10k *ar, unsigned int ce_id,
 
        dest_ring->base_addr_ce_space_unaligned = base_addr;
 
-       dest_ring->base_addr_owner_space = PTR_ALIGN(
-                       dest_ring->base_addr_owner_space_unaligned,
-                       CE_DESC_RING_ALIGN);
-       dest_ring->base_addr_ce_space = ALIGN(
-                       dest_ring->base_addr_ce_space_unaligned,
-                       CE_DESC_RING_ALIGN);
+       dest_ring->base_addr_owner_space =
+                       PTR_ALIGN(dest_ring->base_addr_owner_space_unaligned,
+                                 CE_DESC_RING_ALIGN);
+       dest_ring->base_addr_ce_space =
+                               ALIGN(dest_ring->base_addr_ce_space_unaligned,
+                                     CE_DESC_RING_ALIGN);
+
+       return dest_ring;
+}
+
+static struct ath10k_ce_ring *
+ath10k_ce_alloc_dest_ring_64(struct ath10k *ar, unsigned int ce_id,
+                            const struct ce_attr *attr)
+{
+       struct ath10k_ce_ring *dest_ring;
+       u32 nentries;
+       dma_addr_t base_addr;
+
+       nentries = roundup_pow_of_two(attr->dest_nentries);
+
+       dest_ring = kzalloc(sizeof(*dest_ring) +
+                           (nentries *
+                            sizeof(*dest_ring->per_transfer_context)),
+                           GFP_KERNEL);
+       if (!dest_ring)
+               return ERR_PTR(-ENOMEM);
+
+       dest_ring->nentries = nentries;
+       dest_ring->nentries_mask = nentries - 1;
+
+       /* Legacy platforms that do not support cache
+        * coherent DMA are unsupported
+        */
+       dest_ring->base_addr_owner_space_unaligned =
+               dma_alloc_coherent(ar->dev,
+                                  (nentries * sizeof(struct ce_desc_64) +
+                                   CE_DESC_RING_ALIGN),
+                                  &base_addr, GFP_KERNEL);
+       if (!dest_ring->base_addr_owner_space_unaligned) {
+               kfree(dest_ring);
+               return ERR_PTR(-ENOMEM);
+       }
+
+       dest_ring->base_addr_ce_space_unaligned = base_addr;
+
+       /* Correctly initialize memory to 0 to prevent garbage
+        * data crashing system when download firmware
+        */
+       memset(dest_ring->base_addr_owner_space_unaligned, 0,
+              nentries * sizeof(struct ce_desc_64) + CE_DESC_RING_ALIGN);
+
+       dest_ring->base_addr_owner_space =
+                       PTR_ALIGN(dest_ring->base_addr_owner_space_unaligned,
+                                 CE_DESC_RING_ALIGN);
+       dest_ring->base_addr_ce_space =
+                       ALIGN(dest_ring->base_addr_ce_space_unaligned,
+                             CE_DESC_RING_ALIGN);
 
        return dest_ring;
 }
@@ -1107,65 +1480,36 @@ void ath10k_ce_deinit_pipe(struct ath10k *ar, unsigned int ce_id)
        ath10k_ce_deinit_dest_ring(ar, ce_id);
 }
 
-int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id,
-                        const struct ce_attr *attr)
+static void _ath10k_ce_free_pipe(struct ath10k *ar, int ce_id)
 {
        struct ath10k_ce *ce = ath10k_ce_priv(ar);
        struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
-       int ret;
-
-       /*
-        * Make sure there's enough CE ringbuffer entries for HTT TX to avoid
-        * additional TX locking checks.
-        *
-        * For the lack of a better place do the check here.
-        */
-       BUILD_BUG_ON(2 * TARGET_NUM_MSDU_DESC >
-                    (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
-       BUILD_BUG_ON(2 * TARGET_10_4_NUM_MSDU_DESC_PFC >
-                    (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
-       BUILD_BUG_ON(2 * TARGET_TLV_NUM_MSDU_DESC >
-                    (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
-
-       ce_state->ar = ar;
-       ce_state->id = ce_id;
-       ce_state->ctrl_addr = ath10k_ce_base_address(ar, ce_id);
-       ce_state->attr_flags = attr->flags;
-       ce_state->src_sz_max = attr->src_sz_max;
 
-       if (attr->src_nentries)
-               ce_state->send_cb = attr->send_cb;
-
-       if (attr->dest_nentries)
-               ce_state->recv_cb = attr->recv_cb;
-
-       if (attr->src_nentries) {
-               ce_state->src_ring = ath10k_ce_alloc_src_ring(ar, ce_id, attr);
-               if (IS_ERR(ce_state->src_ring)) {
-                       ret = PTR_ERR(ce_state->src_ring);
-                       ath10k_err(ar, "failed to allocate copy engine source ring %d: %d\n",
-                                  ce_id, ret);
-                       ce_state->src_ring = NULL;
-                       return ret;
-               }
+       if (ce_state->src_ring) {
+               dma_free_coherent(ar->dev,
+                                 (ce_state->src_ring->nentries *
+                                  sizeof(struct ce_desc) +
+                                  CE_DESC_RING_ALIGN),
+                                 ce_state->src_ring->base_addr_owner_space,
+                                 ce_state->src_ring->base_addr_ce_space);
+               kfree(ce_state->src_ring);
        }
 
-       if (attr->dest_nentries) {
-               ce_state->dest_ring = ath10k_ce_alloc_dest_ring(ar, ce_id,
-                                                               attr);
-               if (IS_ERR(ce_state->dest_ring)) {
-                       ret = PTR_ERR(ce_state->dest_ring);
-                       ath10k_err(ar, "failed to allocate copy engine destination ring %d: %d\n",
-                                  ce_id, ret);
-                       ce_state->dest_ring = NULL;
-                       return ret;
-               }
+       if (ce_state->dest_ring) {
+               dma_free_coherent(ar->dev,
+                                 (ce_state->dest_ring->nentries *
+                                  sizeof(struct ce_desc) +
+                                  CE_DESC_RING_ALIGN),
+                                 ce_state->dest_ring->base_addr_owner_space,
+                                 ce_state->dest_ring->base_addr_ce_space);
+               kfree(ce_state->dest_ring);
        }
 
-       return 0;
+       ce_state->src_ring = NULL;
+       ce_state->dest_ring = NULL;
 }
 
-void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id)
+static void _ath10k_ce_free_pipe_64(struct ath10k *ar, int ce_id)
 {
        struct ath10k_ce *ce = ath10k_ce_priv(ar);
        struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
@@ -1173,7 +1517,7 @@ void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id)
        if (ce_state->src_ring) {
                dma_free_coherent(ar->dev,
                                  (ce_state->src_ring->nentries *
-                                  sizeof(struct ce_desc) +
+                                  sizeof(struct ce_desc_64) +
                                   CE_DESC_RING_ALIGN),
                                  ce_state->src_ring->base_addr_owner_space,
                                  ce_state->src_ring->base_addr_ce_space);
@@ -1183,7 +1527,7 @@ void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id)
        if (ce_state->dest_ring) {
                dma_free_coherent(ar->dev,
                                  (ce_state->dest_ring->nentries *
-                                  sizeof(struct ce_desc) +
+                                  sizeof(struct ce_desc_64) +
                                   CE_DESC_RING_ALIGN),
                                  ce_state->dest_ring->base_addr_owner_space,
                                  ce_state->dest_ring->base_addr_ce_space);
@@ -1194,6 +1538,14 @@ void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id)
        ce_state->dest_ring = NULL;
 }
 
+void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id)
+{
+       struct ath10k_ce *ce = ath10k_ce_priv(ar);
+       struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
+
+       ce_state->ops->ce_free_pipe(ar, ce_id);
+}
+
 void ath10k_ce_dump_registers(struct ath10k *ar,
                              struct ath10k_fw_crash_data *crash_data)
 {
@@ -1232,3 +1584,99 @@ void ath10k_ce_dump_registers(struct ath10k *ar,
 
        spin_unlock_bh(&ce->ce_lock);
 }
+
+static const struct ath10k_ce_ops ce_ops = {
+       .ce_alloc_src_ring = ath10k_ce_alloc_src_ring,
+       .ce_alloc_dst_ring = ath10k_ce_alloc_dest_ring,
+       .ce_rx_post_buf = __ath10k_ce_rx_post_buf,
+       .ce_completed_recv_next_nolock = _ath10k_ce_completed_recv_next_nolock,
+       .ce_revoke_recv_next = _ath10k_ce_revoke_recv_next,
+       .ce_extract_desc_data = ath10k_ce_extract_desc_data,
+       .ce_free_pipe = _ath10k_ce_free_pipe,
+       .ce_send_nolock = _ath10k_ce_send_nolock,
+};
+
+static const struct ath10k_ce_ops ce_64_ops = {
+       .ce_alloc_src_ring = ath10k_ce_alloc_src_ring_64,
+       .ce_alloc_dst_ring = ath10k_ce_alloc_dest_ring_64,
+       .ce_rx_post_buf = __ath10k_ce_rx_post_buf_64,
+       .ce_completed_recv_next_nolock =
+                               _ath10k_ce_completed_recv_next_nolock_64,
+       .ce_revoke_recv_next = _ath10k_ce_revoke_recv_next_64,
+       .ce_extract_desc_data = ath10k_ce_extract_desc_data_64,
+       .ce_free_pipe = _ath10k_ce_free_pipe_64,
+       .ce_send_nolock = _ath10k_ce_send_nolock_64,
+};
+
+static void ath10k_ce_set_ops(struct ath10k *ar,
+                             struct ath10k_ce_pipe *ce_state)
+{
+       switch (ar->hw_rev) {
+       case ATH10K_HW_WCN3990:
+               ce_state->ops = &ce_64_ops;
+               break;
+       default:
+               ce_state->ops = &ce_ops;
+               break;
+       }
+}
+
+int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id,
+                        const struct ce_attr *attr)
+{
+       struct ath10k_ce *ce = ath10k_ce_priv(ar);
+       struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
+       int ret;
+
+       ath10k_ce_set_ops(ar, ce_state);
+       /* Make sure there's enough CE ringbuffer entries for HTT TX to avoid
+        * additional TX locking checks.
+        *
+        * For the lack of a better place do the check here.
+        */
+       BUILD_BUG_ON(2 * TARGET_NUM_MSDU_DESC >
+                    (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
+       BUILD_BUG_ON(2 * TARGET_10_4_NUM_MSDU_DESC_PFC >
+                    (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
+       BUILD_BUG_ON(2 * TARGET_TLV_NUM_MSDU_DESC >
+                    (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
+
+       ce_state->ar = ar;
+       ce_state->id = ce_id;
+       ce_state->ctrl_addr = ath10k_ce_base_address(ar, ce_id);
+       ce_state->attr_flags = attr->flags;
+       ce_state->src_sz_max = attr->src_sz_max;
+
+       if (attr->src_nentries)
+               ce_state->send_cb = attr->send_cb;
+
+       if (attr->dest_nentries)
+               ce_state->recv_cb = attr->recv_cb;
+
+       if (attr->src_nentries) {
+               ce_state->src_ring =
+                       ce_state->ops->ce_alloc_src_ring(ar, ce_id, attr);
+               if (IS_ERR(ce_state->src_ring)) {
+                       ret = PTR_ERR(ce_state->src_ring);
+                       ath10k_err(ar, "failed to alloc CE src ring %d: %d\n",
+                                  ce_id, ret);
+                       ce_state->src_ring = NULL;
+                       return ret;
+               }
+       }
+
+       if (attr->dest_nentries) {
+               ce_state->dest_ring = ce_state->ops->ce_alloc_dst_ring(ar,
+                                                                       ce_id,
+                                                                       attr);
+               if (IS_ERR(ce_state->dest_ring)) {
+                       ret = PTR_ERR(ce_state->dest_ring);
+                       ath10k_err(ar, "failed to alloc CE dest ring %d: %d\n",
+                                  ce_id, ret);
+                       ce_state->dest_ring = NULL;
+                       return ret;
+               }
+       }
+
+       return 0;
+}
index bdec794..06ac2eb 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -36,6 +36,10 @@ struct ath10k_ce_pipe;
 
 #define CE_DESC_FLAGS_GATHER         (1 << 0)
 #define CE_DESC_FLAGS_BYTE_SWAP      (1 << 1)
+#define CE_WCN3990_DESC_FLAGS_GATHER BIT(31)
+
+#define CE_DESC_FLAGS_GET_MASK         GENMASK(4, 0)
+#define CE_DESC_37BIT_ADDR_MASK                GENMASK_ULL(37, 0)
 
 /* Following desc flags are used in QCA99X0 */
 #define CE_DESC_FLAGS_HOST_INT_DIS     (1 << 2)
@@ -50,6 +54,16 @@ struct ce_desc {
        __le16 flags; /* %CE_DESC_FLAGS_ */
 };
 
+struct ce_desc_64 {
+       __le64 addr;
+       __le16 nbytes; /* length in register map */
+       __le16 flags; /* fw_metadata_high */
+       __le32 toeplitz_hash_result;
+};
+
+#define CE_DESC_SIZE sizeof(struct ce_desc)
+#define CE_DESC_SIZE_64 sizeof(struct ce_desc_64)
+
 struct ath10k_ce_ring {
        /* Number of entries in this ring; must be power of 2 */
        unsigned int nentries;
@@ -117,6 +131,7 @@ struct ath10k_ce_pipe {
        unsigned int src_sz_max;
        struct ath10k_ce_ring *src_ring;
        struct ath10k_ce_ring *dest_ring;
+       const struct ath10k_ce_ops *ops;
 };
 
 /* Copy Engine settable attributes */
@@ -160,7 +175,7 @@ struct ath10k_ce {
  */
 int ath10k_ce_send(struct ath10k_ce_pipe *ce_state,
                   void *per_transfer_send_context,
-                  u32 buffer,
+                  dma_addr_t buffer,
                   unsigned int nbytes,
                   /* 14 bits */
                   unsigned int transfer_id,
@@ -168,7 +183,7 @@ int ath10k_ce_send(struct ath10k_ce_pipe *ce_state,
 
 int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
                          void *per_transfer_context,
-                         u32 buffer,
+                         dma_addr_t buffer,
                          unsigned int nbytes,
                          unsigned int transfer_id,
                          unsigned int flags);
@@ -180,8 +195,8 @@ int ath10k_ce_num_free_src_entries(struct ath10k_ce_pipe *pipe);
 /*==================Recv=======================*/
 
 int __ath10k_ce_rx_num_free_bufs(struct ath10k_ce_pipe *pipe);
-int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr);
-int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr);
+int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx,
+                         dma_addr_t paddr);
 void ath10k_ce_rx_update_write_idx(struct ath10k_ce_pipe *pipe, u32 nentries);
 
 /* recv flags */
@@ -222,7 +237,7 @@ void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id);
  */
 int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
                               void **per_transfer_contextp,
-                              u32 *bufferp);
+                              dma_addr_t *bufferp);
 
 int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
                                         void **per_transfer_contextp,
@@ -235,7 +250,7 @@ int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
  */
 int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state,
                               void **per_transfer_contextp,
-                              u32 *bufferp,
+                              dma_addr_t *bufferp,
                               unsigned int *nbytesp,
                               unsigned int *transfer_idp);
 
@@ -281,6 +296,31 @@ struct ce_attr {
        void (*recv_cb)(struct ath10k_ce_pipe *);
 };
 
+struct ath10k_ce_ops {
+       struct ath10k_ce_ring *(*ce_alloc_src_ring)(struct ath10k *ar,
+                                                   u32 ce_id,
+                                                   const struct ce_attr *attr);
+       struct ath10k_ce_ring *(*ce_alloc_dst_ring)(struct ath10k *ar,
+                                                   u32 ce_id,
+                                                   const struct ce_attr *attr);
+       int (*ce_rx_post_buf)(struct ath10k_ce_pipe *pipe, void *ctx,
+                             dma_addr_t paddr);
+       int (*ce_completed_recv_next_nolock)(struct ath10k_ce_pipe *ce_state,
+                                            void **per_transfer_contextp,
+                                            u32 *nbytesp);
+       int (*ce_revoke_recv_next)(struct ath10k_ce_pipe *ce_state,
+                                  void **per_transfer_contextp,
+                                  dma_addr_t *nbytesp);
+       void (*ce_extract_desc_data)(struct ath10k *ar,
+                                    struct ath10k_ce_ring *src_ring,
+                                    u32 sw_index, dma_addr_t *bufferp,
+                                    u32 *nbytesp, u32 *transfer_idp);
+       void (*ce_free_pipe)(struct ath10k *ar, int ce_id);
+       int (*ce_send_nolock)(struct ath10k_ce_pipe *pipe,
+                             void *per_transfer_context,
+                             dma_addr_t buffer, u32 nbytes,
+                             u32 transfer_id, u32 flags);
+};
 static inline u32 ath10k_ce_base_address(struct ath10k *ar, unsigned int ce_id)
 {
        return CE0_BASE_ADDRESS + (CE1_BASE_ADDRESS - CE0_BASE_ADDRESS) * ce_id;
@@ -292,6 +332,12 @@ static inline u32 ath10k_ce_base_address(struct ath10k *ar, unsigned int ce_id)
 #define CE_DEST_RING_TO_DESC(baddr, idx) \
        (&(((struct ce_desc *)baddr)[idx]))
 
+#define CE_SRC_RING_TO_DESC_64(baddr, idx) \
+       (&(((struct ce_desc_64 *)baddr)[idx]))
+
+#define CE_DEST_RING_TO_DESC_64(baddr, idx) \
+       (&(((struct ce_desc_64 *)baddr)[idx]))
+
 /* Ring arithmetic (modulus number of entries in ring, which is a pwr of 2). */
 #define CE_RING_DELTA(nentries_mask, fromidx, toidx) \
        (((int)(toidx) - (int)(fromidx)) & (nentries_mask))
index 6d065f8..fe9341c 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -32,6 +32,7 @@
 #include "htt.h"
 #include "testmode.h"
 #include "wmi-ops.h"
+#include "coredump.h"
 
 unsigned int ath10k_debug_mask;
 static unsigned int ath10k_cryptmode_param;
@@ -39,17 +40,25 @@ static bool uart_print;
 static bool skip_otp;
 static bool rawmode;
 
+/* Enable ATH10K_FW_CRASH_DUMP_REGISTERS and ATH10K_FW_CRASH_DUMP_CE_DATA
+ * by default.
+ */
+unsigned long ath10k_coredump_mask = 0x3;
+
+/* FIXME: most of these should be readonly */
 module_param_named(debug_mask, ath10k_debug_mask, uint, 0644);
 module_param_named(cryptmode, ath10k_cryptmode_param, uint, 0644);
 module_param(uart_print, bool, 0644);
 module_param(skip_otp, bool, 0644);
 module_param(rawmode, bool, 0644);
+module_param_named(coredump_mask, ath10k_coredump_mask, ulong, 0444);
 
 MODULE_PARM_DESC(debug_mask, "Debugging mask");
 MODULE_PARM_DESC(uart_print, "Uart target debugging");
 MODULE_PARM_DESC(skip_otp, "Skip otp failure for calibration in testmode");
 MODULE_PARM_DESC(cryptmode, "Crypto mode: 0-hardware, 1-software");
 MODULE_PARM_DESC(rawmode, "Use raw 802.11 frame datapath");
+MODULE_PARM_DESC(coredump_mask, "Bitfield of what to include in firmware crash file");
 
 static const struct ath10k_hw_params ath10k_hw_params_list[] = {
        {
@@ -78,6 +87,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
                .num_peers = TARGET_TLV_NUM_PEERS,
                .ast_skid_limit = 0x10,
                .num_wds_entries = 0x20,
+               .target_64bit = false,
+               .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
        },
        {
                .id = QCA9887_HW_1_0_VERSION,
@@ -105,6 +116,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
                .num_peers = TARGET_TLV_NUM_PEERS,
                .ast_skid_limit = 0x10,
                .num_wds_entries = 0x20,
+               .target_64bit = false,
+               .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
        },
        {
                .id = QCA6174_HW_2_1_VERSION,
@@ -131,6 +144,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
                .num_peers = TARGET_TLV_NUM_PEERS,
                .ast_skid_limit = 0x10,
                .num_wds_entries = 0x20,
+               .target_64bit = false,
+               .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
        },
        {
                .id = QCA6174_HW_2_1_VERSION,
@@ -157,6 +172,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
                .num_peers = TARGET_TLV_NUM_PEERS,
                .ast_skid_limit = 0x10,
                .num_wds_entries = 0x20,
+               .target_64bit = false,
+               .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
        },
        {
                .id = QCA6174_HW_3_0_VERSION,
@@ -183,6 +200,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
                .num_peers = TARGET_TLV_NUM_PEERS,
                .ast_skid_limit = 0x10,
                .num_wds_entries = 0x20,
+               .target_64bit = false,
+               .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
        },
        {
                .id = QCA6174_HW_3_2_VERSION,
@@ -212,6 +231,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
                .num_peers = TARGET_TLV_NUM_PEERS,
                .ast_skid_limit = 0x10,
                .num_wds_entries = 0x20,
+               .target_64bit = false,
+               .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
        },
        {
                .id = QCA99X0_HW_2_0_DEV_VERSION,
@@ -244,6 +265,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
                .num_peers = TARGET_TLV_NUM_PEERS,
                .ast_skid_limit = 0x10,
                .num_wds_entries = 0x20,
+               .target_64bit = false,
+               .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
        },
        {
                .id = QCA9984_HW_1_0_DEV_VERSION,
@@ -281,6 +304,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
                .num_peers = TARGET_TLV_NUM_PEERS,
                .ast_skid_limit = 0x10,
                .num_wds_entries = 0x20,
+               .target_64bit = false,
+               .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
        },
        {
                .id = QCA9888_HW_2_0_DEV_VERSION,
@@ -317,6 +342,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
                .num_peers = TARGET_TLV_NUM_PEERS,
                .ast_skid_limit = 0x10,
                .num_wds_entries = 0x20,
+               .target_64bit = false,
+               .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
        },
        {
                .id = QCA9377_HW_1_0_DEV_VERSION,
@@ -343,6 +370,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
                .num_peers = TARGET_TLV_NUM_PEERS,
                .ast_skid_limit = 0x10,
                .num_wds_entries = 0x20,
+               .target_64bit = false,
+               .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
        },
        {
                .id = QCA9377_HW_1_1_DEV_VERSION,
@@ -371,6 +400,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
                .num_peers = TARGET_TLV_NUM_PEERS,
                .ast_skid_limit = 0x10,
                .num_wds_entries = 0x20,
+               .target_64bit = false,
+               .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
        },
        {
                .id = QCA4019_HW_1_0_DEV_VERSION,
@@ -404,6 +435,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
                .num_peers = TARGET_TLV_NUM_PEERS,
                .ast_skid_limit = 0x10,
                .num_wds_entries = 0x20,
+               .target_64bit = false,
+               .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
        },
        {
                .id = WCN3990_HW_1_0_DEV_VERSION,
@@ -422,6 +455,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
                .num_peers = TARGET_HL_10_TLV_NUM_PEERS,
                .ast_skid_limit = TARGET_HL_10_TLV_AST_SKID_LIMIT,
                .num_wds_entries = TARGET_HL_10_TLV_NUM_WDS_ENTRIES,
+               .target_64bit = true,
+               .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL_DUAL_MAC,
        },
 };
 
@@ -445,6 +480,7 @@ static const char *const ath10k_core_fw_feature_str[] = {
        [ATH10K_FW_FEATURE_ALLOWS_MESH_BCAST] = "allows-mesh-bcast",
        [ATH10K_FW_FEATURE_NO_PS] = "no-ps",
        [ATH10K_FW_FEATURE_MGMT_TX_BY_REF] = "mgmt-tx-by-reference",
+       [ATH10K_FW_FEATURE_NON_BMI] = "non-bmi",
 };
 
 static unsigned int ath10k_core_get_fw_feature_str(char *buf,
@@ -1524,8 +1560,8 @@ int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name,
                data += ie_len;
        }
 
-       if (!fw_file->firmware_data ||
-           !fw_file->firmware_len) {
+       if (!test_bit(ATH10K_FW_FEATURE_NON_BMI, fw_file->fw_features) &&
+           (!fw_file->firmware_data || !fw_file->firmware_len)) {
                ath10k_warn(ar, "No ATH10K_FW_IE_FW_IMAGE found from '%s/%s', skipping\n",
                            ar->hw_params.fw.dir, name);
                ret = -ENOMEDIUM;
@@ -1551,6 +1587,7 @@ static void ath10k_core_get_fw_name(struct ath10k *ar, char *fw_name,
                break;
        case ATH10K_BUS_PCI:
        case ATH10K_BUS_AHB:
+       case ATH10K_BUS_SNOC:
                scnprintf(fw_name, fw_name_len, "%s-%d.bin",
                          ATH10K_FW_FILE_BASE, fw_api);
                break;
@@ -1836,7 +1873,7 @@ static void ath10k_core_restart(struct work_struct *work)
 
        mutex_unlock(&ar->conf_mutex);
 
-       ret = ath10k_debug_fw_devcoredump(ar);
+       ret = ath10k_coredump_submit(ar);
        if (ret)
                ath10k_warn(ar, "failed to send firmware crash dump via devcoredump: %d",
                            ret);
@@ -2078,43 +2115,47 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
 
        ar->running_fw = fw;
 
-       ath10k_bmi_start(ar);
-
-       if (ath10k_init_configure_target(ar)) {
-               status = -EINVAL;
-               goto err;
-       }
+       if (!test_bit(ATH10K_FW_FEATURE_NON_BMI,
+                     ar->running_fw->fw_file.fw_features)) {
+               ath10k_bmi_start(ar);
 
-       status = ath10k_download_cal_data(ar);
-       if (status)
-               goto err;
+               if (ath10k_init_configure_target(ar)) {
+                       status = -EINVAL;
+                       goto err;
+               }
 
-       /* Some of of qca988x solutions are having global reset issue
-        * during target initialization. Bypassing PLL setting before
-        * downloading firmware and letting the SoC run on REF_CLK is
-        * fixing the problem. Corresponding firmware change is also needed
-        * to set the clock source once the target is initialized.
-        */
-       if (test_bit(ATH10K_FW_FEATURE_SUPPORTS_SKIP_CLOCK_INIT,
-                    ar->running_fw->fw_file.fw_features)) {
-               status = ath10k_bmi_write32(ar, hi_skip_clock_init, 1);
-               if (status) {
-                       ath10k_err(ar, "could not write to skip_clock_init: %d\n",
-                                  status);
+               status = ath10k_download_cal_data(ar);
+               if (status)
                        goto err;
+
+               /* Some of of qca988x solutions are having global reset issue
+                * during target initialization. Bypassing PLL setting before
+                * downloading firmware and letting the SoC run on REF_CLK is
+                * fixing the problem. Corresponding firmware change is also
+                * needed to set the clock source once the target is
+                * initialized.
+                */
+               if (test_bit(ATH10K_FW_FEATURE_SUPPORTS_SKIP_CLOCK_INIT,
+                            ar->running_fw->fw_file.fw_features)) {
+                       status = ath10k_bmi_write32(ar, hi_skip_clock_init, 1);
+                       if (status) {
+                               ath10k_err(ar, "could not write to skip_clock_init: %d\n",
+                                          status);
+                               goto err;
+                       }
                }
-       }
 
-       status = ath10k_download_fw(ar);
-       if (status)
-               goto err;
+               status = ath10k_download_fw(ar);
+               if (status)
+                       goto err;
 
-       status = ath10k_init_uart(ar);
-       if (status)
-               goto err;
+               status = ath10k_init_uart(ar);
+               if (status)
+                       goto err;
 
-       if (ar->hif.bus == ATH10K_BUS_SDIO)
-               ath10k_init_sdio(ar);
+               if (ar->hif.bus == ATH10K_BUS_SDIO)
+                       ath10k_init_sdio(ar);
+       }
 
        ar->htc.htc_ops.target_send_suspend_complete =
                ath10k_send_suspend_complete;
@@ -2125,9 +2166,12 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
                goto err;
        }
 
-       status = ath10k_bmi_done(ar);
-       if (status)
-               goto err;
+       if (!test_bit(ATH10K_FW_FEATURE_NON_BMI,
+                     ar->running_fw->fw_file.fw_features)) {
+               status = ath10k_bmi_done(ar);
+               if (status)
+                       goto err;
+       }
 
        status = ath10k_wmi_attach(ar);
        if (status) {
@@ -2370,19 +2414,35 @@ static int ath10k_core_probe_fw(struct ath10k *ar)
                return ret;
        }
 
-       memset(&target_info, 0, sizeof(target_info));
-       if (ar->hif.bus == ATH10K_BUS_SDIO)
+       switch (ar->hif.bus) {
+       case ATH10K_BUS_SDIO:
+               memset(&target_info, 0, sizeof(target_info));
                ret = ath10k_bmi_get_target_info_sdio(ar, &target_info);
-       else
+               if (ret) {
+                       ath10k_err(ar, "could not get target info (%d)\n", ret);
+                       goto err_power_down;
+               }
+               ar->target_version = target_info.version;
+               ar->hw->wiphy->hw_version = target_info.version;
+               break;
+       case ATH10K_BUS_PCI:
+       case ATH10K_BUS_AHB:
+       case ATH10K_BUS_USB:
+               memset(&target_info, 0, sizeof(target_info));
                ret = ath10k_bmi_get_target_info(ar, &target_info);
-       if (ret) {
-               ath10k_err(ar, "could not get target info (%d)\n", ret);
-               goto err_power_down;
+               if (ret) {
+                       ath10k_err(ar, "could not get target info (%d)\n", ret);
+                       goto err_power_down;
+               }
+               ar->target_version = target_info.version;
+               ar->hw->wiphy->hw_version = target_info.version;
+               break;
+       case ATH10K_BUS_SNOC:
+               break;
+       default:
+               ath10k_err(ar, "incorrect hif bus type: %d\n", ar->hif.bus);
        }
 
-       ar->target_version = target_info.version;
-       ar->hw->wiphy->hw_version = target_info.version;
-
        ret = ath10k_init_hw_params(ar);
        if (ret) {
                ath10k_err(ar, "could not get hw params (%d)\n", ret);
@@ -2402,37 +2462,40 @@ static int ath10k_core_probe_fw(struct ath10k *ar)
 
        ath10k_debug_print_hwfw_info(ar);
 
-       ret = ath10k_core_pre_cal_download(ar);
-       if (ret) {
-               /* pre calibration data download is not necessary
-                * for all the chipsets. Ignore failures and continue.
-                */
-               ath10k_dbg(ar, ATH10K_DBG_BOOT,
-                          "could not load pre cal data: %d\n", ret);
-       }
+       if (!test_bit(ATH10K_FW_FEATURE_NON_BMI,
+                     ar->normal_mode_fw.fw_file.fw_features)) {
+               ret = ath10k_core_pre_cal_download(ar);
+               if (ret) {
+                       /* pre calibration data download is not necessary
+                        * for all the chipsets. Ignore failures and continue.
+                        */
+                       ath10k_dbg(ar, ATH10K_DBG_BOOT,
+                                  "could not load pre cal data: %d\n", ret);
+               }
 
-       ret = ath10k_core_get_board_id_from_otp(ar);
-       if (ret && ret != -EOPNOTSUPP) {
-               ath10k_err(ar, "failed to get board id from otp: %d\n",
-                          ret);
-               goto err_free_firmware_files;
-       }
+               ret = ath10k_core_get_board_id_from_otp(ar);
+               if (ret && ret != -EOPNOTSUPP) {
+                       ath10k_err(ar, "failed to get board id from otp: %d\n",
+                                  ret);
+                       goto err_free_firmware_files;
+               }
 
-       ret = ath10k_core_check_smbios(ar);
-       if (ret)
-               ath10k_dbg(ar, ATH10K_DBG_BOOT, "SMBIOS bdf variant name not set.\n");
+               ret = ath10k_core_check_smbios(ar);
+               if (ret)
+                       ath10k_dbg(ar, ATH10K_DBG_BOOT, "SMBIOS bdf variant name not set.\n");
 
-       ret = ath10k_core_check_dt(ar);
-       if (ret)
-               ath10k_dbg(ar, ATH10K_DBG_BOOT, "DT bdf variant name not set.\n");
+               ret = ath10k_core_check_dt(ar);
+               if (ret)
+                       ath10k_dbg(ar, ATH10K_DBG_BOOT, "DT bdf variant name not set.\n");
 
-       ret = ath10k_core_fetch_board_file(ar);
-       if (ret) {
-               ath10k_err(ar, "failed to fetch board file: %d\n", ret);
-               goto err_free_firmware_files;
-       }
+               ret = ath10k_core_fetch_board_file(ar);
+               if (ret) {
+                       ath10k_err(ar, "failed to fetch board file: %d\n", ret);
+                       goto err_free_firmware_files;
+               }
 
-       ath10k_debug_print_board_info(ar);
+               ath10k_debug_print_board_info(ar);
+       }
 
        ret = ath10k_core_init_firmware_features(ar);
        if (ret) {
@@ -2441,11 +2504,15 @@ static int ath10k_core_probe_fw(struct ath10k *ar)
                goto err_free_firmware_files;
        }
 
-       ret = ath10k_swap_code_seg_init(ar, &ar->normal_mode_fw.fw_file);
-       if (ret) {
-               ath10k_err(ar, "failed to initialize code swap segment: %d\n",
-                          ret);
-               goto err_free_firmware_files;
+       if (!test_bit(ATH10K_FW_FEATURE_NON_BMI,
+                     ar->normal_mode_fw.fw_file.fw_features)) {
+               ret = ath10k_swap_code_seg_init(ar,
+                                               &ar->normal_mode_fw.fw_file);
+               if (ret) {
+                       ath10k_err(ar, "failed to initialize code swap segment: %d\n",
+                                  ret);
+                       goto err_free_firmware_files;
+               }
        }
 
        mutex_lock(&ar->conf_mutex);
@@ -2497,10 +2564,16 @@ static void ath10k_core_register_work(struct work_struct *work)
                goto err_release_fw;
        }
 
+       status = ath10k_coredump_register(ar);
+       if (status) {
+               ath10k_err(ar, "unable to register coredump\n");
+               goto err_unregister_mac;
+       }
+
        status = ath10k_debug_register(ar);
        if (status) {
                ath10k_err(ar, "unable to initialize debugfs\n");
-               goto err_unregister_mac;
+               goto err_unregister_coredump;
        }
 
        status = ath10k_spectral_create(ar);
@@ -2523,6 +2596,8 @@ err_spectral_destroy:
        ath10k_spectral_destroy(ar);
 err_debug_destroy:
        ath10k_debug_destroy(ar);
+err_unregister_coredump:
+       ath10k_coredump_unregister(ar);
 err_unregister_mac:
        ath10k_mac_unregister(ar);
 err_release_fw:
@@ -2677,12 +2752,19 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
 
        init_dummy_netdev(&ar->napi_dev);
 
-       ret = ath10k_debug_create(ar);
+       ret = ath10k_coredump_create(ar);
        if (ret)
                goto err_free_aux_wq;
 
+       ret = ath10k_debug_create(ar);
+       if (ret)
+               goto err_free_coredump;
+
        return ar;
 
+err_free_coredump:
+       ath10k_coredump_destroy(ar);
+
 err_free_aux_wq:
        destroy_workqueue(ar->workqueue_aux);
 err_free_wq:
@@ -2704,6 +2786,7 @@ void ath10k_core_destroy(struct ath10k *ar)
        destroy_workqueue(ar->workqueue_aux);
 
        ath10k_debug_destroy(ar);
+       ath10k_coredump_destroy(ar);
        ath10k_htt_tx_destroy(&ar->htt);
        ath10k_wmi_free_host_mem(ar);
        ath10k_mac_destroy(ar);
index 631df21..fe6b303 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -92,6 +92,7 @@ enum ath10k_bus {
        ATH10K_BUS_AHB,
        ATH10K_BUS_SDIO,
        ATH10K_BUS_USB,
+       ATH10K_BUS_SNOC,
 };
 
 static inline const char *ath10k_bus_str(enum ath10k_bus bus)
@@ -105,6 +106,8 @@ static inline const char *ath10k_bus_str(enum ath10k_bus bus)
                return "sdio";
        case ATH10K_BUS_USB:
                return "usb";
+       case ATH10K_BUS_SNOC:
+               return "snoc";
        }
 
        return "unknown";
@@ -457,14 +460,17 @@ struct ath10k_ce_crash_hdr {
        struct ath10k_ce_crash_data entries[];
 };
 
+#define MAX_MEM_DUMP_TYPE      5
+
 /* used for crash-dump storage, protected by data-lock */
 struct ath10k_fw_crash_data {
-       bool crashed_since_read;
-
        guid_t guid;
        struct timespec64 timestamp;
        __le32 registers[REG_DUMP_COUNT_QCA988X];
        struct ath10k_ce_crash_data ce_crash_data[CE_COUNT_MAX];
+
+       u8 *ramdump_buf;
+       size_t ramdump_buf_len;
 };
 
 struct ath10k_debug {
@@ -490,8 +496,6 @@ struct ath10k_debug {
        u32 reg_addr;
        u32 nf_cal_period;
        void *cal_data;
-
-       struct ath10k_fw_crash_data *fw_crash_data;
 };
 
 enum ath10k_state {
@@ -616,6 +620,9 @@ enum ath10k_fw_features {
        /* Firmware allows management tx by reference instead of by value. */
        ATH10K_FW_FEATURE_MGMT_TX_BY_REF = 18,
 
+       /* Firmware load is done externally, not by bmi */
+       ATH10K_FW_FEATURE_NON_BMI = 19,
+
        /* keep last */
        ATH10K_FW_FEATURE_COUNT,
 };
@@ -965,6 +972,13 @@ struct ath10k {
 #endif
 
        u32 pktlog_filter;
+
+#ifdef CONFIG_DEV_COREDUMP
+       struct {
+               struct ath10k_fw_crash_data *fw_crash_data;
+       } coredump;
+#endif
+
        struct {
                /* protected by conf_mutex */
                struct ath10k_fw_components utf_mode_fw;
@@ -1018,6 +1032,8 @@ static inline bool ath10k_peer_stats_enabled(struct ath10k *ar)
        return false;
 }
 
+extern unsigned long ath10k_coredump_mask;
+
 struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
                                  enum ath10k_bus bus,
                                  enum ath10k_hw_rev hw_rev,
diff --git a/drivers/net/wireless/ath/ath10k/coredump.c b/drivers/net/wireless/ath/ath10k/coredump.c
new file mode 100644 (file)
index 0000000..4dde126
--- /dev/null
@@ -0,0 +1,993 @@
+/*
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "coredump.h"
+
+#include <linux/devcoredump.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/utsname.h>
+
+#include "debug.h"
+#include "hw.h"
+
+static const struct ath10k_mem_section qca6174_hw21_register_sections[] = {
+       {0x800, 0x810},
+       {0x820, 0x82C},
+       {0x830, 0x8F4},
+       {0x90C, 0x91C},
+       {0xA14, 0xA18},
+       {0xA84, 0xA94},
+       {0xAA8, 0xAD4},
+       {0xADC, 0xB40},
+       {0x1000, 0x10A4},
+       {0x10BC, 0x111C},
+       {0x1134, 0x1138},
+       {0x1144, 0x114C},
+       {0x1150, 0x115C},
+       {0x1160, 0x1178},
+       {0x1240, 0x1260},
+       {0x2000, 0x207C},
+       {0x3000, 0x3014},
+       {0x4000, 0x4014},
+       {0x5000, 0x5124},
+       {0x6000, 0x6040},
+       {0x6080, 0x60CC},
+       {0x6100, 0x611C},
+       {0x6140, 0x61D8},
+       {0x6200, 0x6238},
+       {0x6240, 0x628C},
+       {0x62C0, 0x62EC},
+       {0x6380, 0x63E8},
+       {0x6400, 0x6440},
+       {0x6480, 0x64CC},
+       {0x6500, 0x651C},
+       {0x6540, 0x6580},
+       {0x6600, 0x6638},
+       {0x6640, 0x668C},
+       {0x66C0, 0x66EC},
+       {0x6780, 0x67E8},
+       {0x7080, 0x708C},
+       {0x70C0, 0x70C8},
+       {0x7400, 0x741C},
+       {0x7440, 0x7454},
+       {0x7800, 0x7818},
+       {0x8000, 0x8004},
+       {0x8010, 0x8064},
+       {0x8080, 0x8084},
+       {0x80A0, 0x80A4},
+       {0x80C0, 0x80C4},
+       {0x80E0, 0x80F4},
+       {0x8100, 0x8104},
+       {0x8110, 0x812C},
+       {0x9000, 0x9004},
+       {0x9800, 0x982C},
+       {0x9830, 0x9838},
+       {0x9840, 0x986C},
+       {0x9870, 0x9898},
+       {0x9A00, 0x9C00},
+       {0xD580, 0xD59C},
+       {0xF000, 0xF0E0},
+       {0xF140, 0xF190},
+       {0xF250, 0xF25C},
+       {0xF260, 0xF268},
+       {0xF26C, 0xF2A8},
+       {0x10008, 0x1000C},
+       {0x10014, 0x10018},
+       {0x1001C, 0x10020},
+       {0x10024, 0x10028},
+       {0x10030, 0x10034},
+       {0x10040, 0x10054},
+       {0x10058, 0x1007C},
+       {0x10080, 0x100C4},
+       {0x100C8, 0x10114},
+       {0x1012C, 0x10130},
+       {0x10138, 0x10144},
+       {0x10200, 0x10220},
+       {0x10230, 0x10250},
+       {0x10260, 0x10280},
+       {0x10290, 0x102B0},
+       {0x102C0, 0x102DC},
+       {0x102E0, 0x102F4},
+       {0x102FC, 0x1037C},
+       {0x10380, 0x10390},
+       {0x10800, 0x10828},
+       {0x10840, 0x10844},
+       {0x10880, 0x10884},
+       {0x108C0, 0x108E8},
+       {0x10900, 0x10928},
+       {0x10940, 0x10944},
+       {0x10980, 0x10984},
+       {0x109C0, 0x109E8},
+       {0x10A00, 0x10A28},
+       {0x10A40, 0x10A50},
+       {0x11000, 0x11028},
+       {0x11030, 0x11034},
+       {0x11038, 0x11068},
+       {0x11070, 0x11074},
+       {0x11078, 0x110A8},
+       {0x110B0, 0x110B4},
+       {0x110B8, 0x110E8},
+       {0x110F0, 0x110F4},
+       {0x110F8, 0x11128},
+       {0x11138, 0x11144},
+       {0x11178, 0x11180},
+       {0x111B8, 0x111C0},
+       {0x111F8, 0x11200},
+       {0x11238, 0x1123C},
+       {0x11270, 0x11274},
+       {0x11278, 0x1127C},
+       {0x112B0, 0x112B4},
+       {0x112B8, 0x112BC},
+       {0x112F0, 0x112F4},
+       {0x112F8, 0x112FC},
+       {0x11338, 0x1133C},
+       {0x11378, 0x1137C},
+       {0x113B8, 0x113BC},
+       {0x113F8, 0x113FC},
+       {0x11438, 0x11440},
+       {0x11478, 0x11480},
+       {0x114B8, 0x114BC},
+       {0x114F8, 0x114FC},
+       {0x11538, 0x1153C},
+       {0x11578, 0x1157C},
+       {0x115B8, 0x115BC},
+       {0x115F8, 0x115FC},
+       {0x11638, 0x1163C},
+       {0x11678, 0x1167C},
+       {0x116B8, 0x116BC},
+       {0x116F8, 0x116FC},
+       {0x11738, 0x1173C},
+       {0x11778, 0x1177C},
+       {0x117B8, 0x117BC},
+       {0x117F8, 0x117FC},
+       {0x17000, 0x1701C},
+       {0x17020, 0x170AC},
+       {0x18000, 0x18050},
+       {0x18054, 0x18074},
+       {0x18080, 0x180D4},
+       {0x180DC, 0x18104},
+       {0x18108, 0x1813C},
+       {0x18144, 0x18148},
+       {0x18168, 0x18174},
+       {0x18178, 0x18180},
+       {0x181C8, 0x181E0},
+       {0x181E4, 0x181E8},
+       {0x181EC, 0x1820C},
+       {0x1825C, 0x18280},
+       {0x18284, 0x18290},
+       {0x18294, 0x182A0},
+       {0x18300, 0x18304},
+       {0x18314, 0x18320},
+       {0x18328, 0x18350},
+       {0x1835C, 0x1836C},
+       {0x18370, 0x18390},
+       {0x18398, 0x183AC},
+       {0x183BC, 0x183D8},
+       {0x183DC, 0x183F4},
+       {0x18400, 0x186F4},
+       {0x186F8, 0x1871C},
+       {0x18720, 0x18790},
+       {0x19800, 0x19830},
+       {0x19834, 0x19840},
+       {0x19880, 0x1989C},
+       {0x198A4, 0x198B0},
+       {0x198BC, 0x19900},
+       {0x19C00, 0x19C88},
+       {0x19D00, 0x19D20},
+       {0x19E00, 0x19E7C},
+       {0x19E80, 0x19E94},
+       {0x19E98, 0x19EAC},
+       {0x19EB0, 0x19EBC},
+       {0x19F70, 0x19F74},
+       {0x19F80, 0x19F8C},
+       {0x19FA0, 0x19FB4},
+       {0x19FC0, 0x19FD8},
+       {0x1A000, 0x1A200},
+       {0x1A204, 0x1A210},
+       {0x1A228, 0x1A22C},
+       {0x1A230, 0x1A248},
+       {0x1A250, 0x1A270},
+       {0x1A280, 0x1A290},
+       {0x1A2A0, 0x1A2A4},
+       {0x1A2C0, 0x1A2EC},
+       {0x1A300, 0x1A3BC},
+       {0x1A3F0, 0x1A3F4},
+       {0x1A3F8, 0x1A434},
+       {0x1A438, 0x1A444},
+       {0x1A448, 0x1A468},
+       {0x1A580, 0x1A58C},
+       {0x1A644, 0x1A654},
+       {0x1A670, 0x1A698},
+       {0x1A6AC, 0x1A6B0},
+       {0x1A6D0, 0x1A6D4},
+       {0x1A6EC, 0x1A70C},
+       {0x1A710, 0x1A738},
+       {0x1A7C0, 0x1A7D0},
+       {0x1A7D4, 0x1A7D8},
+       {0x1A7DC, 0x1A7E4},
+       {0x1A7F0, 0x1A7F8},
+       {0x1A888, 0x1A89C},
+       {0x1A8A8, 0x1A8AC},
+       {0x1A8C0, 0x1A8DC},
+       {0x1A8F0, 0x1A8FC},
+       {0x1AE04, 0x1AE08},
+       {0x1AE18, 0x1AE24},
+       {0x1AF80, 0x1AF8C},
+       {0x1AFA0, 0x1AFB4},
+       {0x1B000, 0x1B200},
+       {0x1B284, 0x1B288},
+       {0x1B2D0, 0x1B2D8},
+       {0x1B2DC, 0x1B2EC},
+       {0x1B300, 0x1B340},
+       {0x1B374, 0x1B378},
+       {0x1B380, 0x1B384},
+       {0x1B388, 0x1B38C},
+       {0x1B404, 0x1B408},
+       {0x1B420, 0x1B428},
+       {0x1B440, 0x1B444},
+       {0x1B448, 0x1B44C},
+       {0x1B450, 0x1B458},
+       {0x1B45C, 0x1B468},
+       {0x1B584, 0x1B58C},
+       {0x1B68C, 0x1B690},
+       {0x1B6AC, 0x1B6B0},
+       {0x1B7F0, 0x1B7F8},
+       {0x1C800, 0x1CC00},
+       {0x1CE00, 0x1CE04},
+       {0x1CF80, 0x1CF84},
+       {0x1D200, 0x1D800},
+       {0x1E000, 0x20014},
+       {0x20100, 0x20124},
+       {0x21400, 0x217A8},
+       {0x21800, 0x21BA8},
+       {0x21C00, 0x21FA8},
+       {0x22000, 0x223A8},
+       {0x22400, 0x227A8},
+       {0x22800, 0x22BA8},
+       {0x22C00, 0x22FA8},
+       {0x23000, 0x233A8},
+       {0x24000, 0x24034},
+       {0x26000, 0x26064},
+       {0x27000, 0x27024},
+       {0x34000, 0x3400C},
+       {0x34400, 0x3445C},
+       {0x34800, 0x3485C},
+       {0x34C00, 0x34C5C},
+       {0x35000, 0x3505C},
+       {0x35400, 0x3545C},
+       {0x35800, 0x3585C},
+       {0x35C00, 0x35C5C},
+       {0x36000, 0x3605C},
+       {0x38000, 0x38064},
+       {0x38070, 0x380E0},
+       {0x3A000, 0x3A064},
+       {0x40000, 0x400A4},
+       {0x80000, 0x8000C},
+       {0x80010, 0x80020},
+};
+
+static const struct ath10k_mem_section qca6174_hw30_register_sections[] = {
+       {0x800, 0x810},
+       {0x820, 0x82C},
+       {0x830, 0x8F4},
+       {0x90C, 0x91C},
+       {0xA14, 0xA18},
+       {0xA84, 0xA94},
+       {0xAA8, 0xAD4},
+       {0xADC, 0xB40},
+       {0x1000, 0x10A4},
+       {0x10BC, 0x111C},
+       {0x1134, 0x1138},
+       {0x1144, 0x114C},
+       {0x1150, 0x115C},
+       {0x1160, 0x1178},
+       {0x1240, 0x1260},
+       {0x2000, 0x207C},
+       {0x3000, 0x3014},
+       {0x4000, 0x4014},
+       {0x5000, 0x5124},
+       {0x6000, 0x6040},
+       {0x6080, 0x60CC},
+       {0x6100, 0x611C},
+       {0x6140, 0x61D8},
+       {0x6200, 0x6238},
+       {0x6240, 0x628C},
+       {0x62C0, 0x62EC},
+       {0x6380, 0x63E8},
+       {0x6400, 0x6440},
+       {0x6480, 0x64CC},
+       {0x6500, 0x651C},
+       {0x6540, 0x6580},
+       {0x6600, 0x6638},
+       {0x6640, 0x668C},
+       {0x66C0, 0x66EC},
+       {0x6780, 0x67E8},
+       {0x7080, 0x708C},
+       {0x70C0, 0x70C8},
+       {0x7400, 0x741C},
+       {0x7440, 0x7454},
+       {0x7800, 0x7818},
+       {0x8000, 0x8004},
+       {0x8010, 0x8064},
+       {0x8080, 0x8084},
+       {0x80A0, 0x80A4},
+       {0x80C0, 0x80C4},
+       {0x80E0, 0x80F4},
+       {0x8100, 0x8104},
+       {0x8110, 0x812C},
+       {0x9000, 0x9004},
+       {0x9800, 0x982C},
+       {0x9830, 0x9838},
+       {0x9840, 0x986C},
+       {0x9870, 0x9898},
+       {0x9A00, 0x9C00},
+       {0xD580, 0xD59C},
+       {0xF000, 0xF0E0},
+       {0xF140, 0xF190},
+       {0xF250, 0xF25C},
+       {0xF260, 0xF268},
+       {0xF26C, 0xF2A8},
+       {0x10008, 0x1000C},
+       {0x10014, 0x10018},
+       {0x1001C, 0x10020},
+       {0x10024, 0x10028},
+       {0x10030, 0x10034},
+       {0x10040, 0x10054},
+       {0x10058, 0x1007C},
+       {0x10080, 0x100C4},
+       {0x100C8, 0x10114},
+       {0x1012C, 0x10130},
+       {0x10138, 0x10144},
+       {0x10200, 0x10220},
+       {0x10230, 0x10250},
+       {0x10260, 0x10280},
+       {0x10290, 0x102B0},
+       {0x102C0, 0x102DC},
+       {0x102E0, 0x102F4},
+       {0x102FC, 0x1037C},
+       {0x10380, 0x10390},
+       {0x10800, 0x10828},
+       {0x10840, 0x10844},
+       {0x10880, 0x10884},
+       {0x108C0, 0x108E8},
+       {0x10900, 0x10928},
+       {0x10940, 0x10944},
+       {0x10980, 0x10984},
+       {0x109C0, 0x109E8},
+       {0x10A00, 0x10A28},
+       {0x10A40, 0x10A50},
+       {0x11000, 0x11028},
+       {0x11030, 0x11034},
+       {0x11038, 0x11068},
+       {0x11070, 0x11074},
+       {0x11078, 0x110A8},
+       {0x110B0, 0x110B4},
+       {0x110B8, 0x110E8},
+       {0x110F0, 0x110F4},
+       {0x110F8, 0x11128},
+       {0x11138, 0x11144},
+       {0x11178, 0x11180},
+       {0x111B8, 0x111C0},
+       {0x111F8, 0x11200},
+       {0x11238, 0x1123C},
+       {0x11270, 0x11274},
+       {0x11278, 0x1127C},
+       {0x112B0, 0x112B4},
+       {0x112B8, 0x112BC},
+       {0x112F0, 0x112F4},
+       {0x112F8, 0x112FC},
+       {0x11338, 0x1133C},
+       {0x11378, 0x1137C},
+       {0x113B8, 0x113BC},
+       {0x113F8, 0x113FC},
+       {0x11438, 0x11440},
+       {0x11478, 0x11480},
+       {0x114B8, 0x114BC},
+       {0x114F8, 0x114FC},
+       {0x11538, 0x1153C},
+       {0x11578, 0x1157C},
+       {0x115B8, 0x115BC},
+       {0x115F8, 0x115FC},
+       {0x11638, 0x1163C},
+       {0x11678, 0x1167C},
+       {0x116B8, 0x116BC},
+       {0x116F8, 0x116FC},
+       {0x11738, 0x1173C},
+       {0x11778, 0x1177C},
+       {0x117B8, 0x117BC},
+       {0x117F8, 0x117FC},
+       {0x17000, 0x1701C},
+       {0x17020, 0x170AC},
+       {0x18000, 0x18050},
+       {0x18054, 0x18074},
+       {0x18080, 0x180D4},
+       {0x180DC, 0x18104},
+       {0x18108, 0x1813C},
+       {0x18144, 0x18148},
+       {0x18168, 0x18174},
+       {0x18178, 0x18180},
+       {0x181C8, 0x181E0},
+       {0x181E4, 0x181E8},
+       {0x181EC, 0x1820C},
+       {0x1825C, 0x18280},
+       {0x18284, 0x18290},
+       {0x18294, 0x182A0},
+       {0x18300, 0x18304},
+       {0x18314, 0x18320},
+       {0x18328, 0x18350},
+       {0x1835C, 0x1836C},
+       {0x18370, 0x18390},
+       {0x18398, 0x183AC},
+       {0x183BC, 0x183D8},
+       {0x183DC, 0x183F4},
+       {0x18400, 0x186F4},
+       {0x186F8, 0x1871C},
+       {0x18720, 0x18790},
+       {0x19800, 0x19830},
+       {0x19834, 0x19840},
+       {0x19880, 0x1989C},
+       {0x198A4, 0x198B0},
+       {0x198BC, 0x19900},
+       {0x19C00, 0x19C88},
+       {0x19D00, 0x19D20},
+       {0x19E00, 0x19E7C},
+       {0x19E80, 0x19E94},
+       {0x19E98, 0x19EAC},
+       {0x19EB0, 0x19EBC},
+       {0x19F70, 0x19F74},
+       {0x19F80, 0x19F8C},
+       {0x19FA0, 0x19FB4},
+       {0x19FC0, 0x19FD8},
+       {0x1A000, 0x1A200},
+       {0x1A204, 0x1A210},
+       {0x1A228, 0x1A22C},
+       {0x1A230, 0x1A248},
+       {0x1A250, 0x1A270},
+       {0x1A280, 0x1A290},
+       {0x1A2A0, 0x1A2A4},
+       {0x1A2C0, 0x1A2EC},
+       {0x1A300, 0x1A3BC},
+       {0x1A3F0, 0x1A3F4},
+       {0x1A3F8, 0x1A434},
+       {0x1A438, 0x1A444},
+       {0x1A448, 0x1A468},
+       {0x1A580, 0x1A58C},
+       {0x1A644, 0x1A654},
+       {0x1A670, 0x1A698},
+       {0x1A6AC, 0x1A6B0},
+       {0x1A6D0, 0x1A6D4},
+       {0x1A6EC, 0x1A70C},
+       {0x1A710, 0x1A738},
+       {0x1A7C0, 0x1A7D0},
+       {0x1A7D4, 0x1A7D8},
+       {0x1A7DC, 0x1A7E4},
+       {0x1A7F0, 0x1A7F8},
+       {0x1A888, 0x1A89C},
+       {0x1A8A8, 0x1A8AC},
+       {0x1A8C0, 0x1A8DC},
+       {0x1A8F0, 0x1A8FC},
+       {0x1AE04, 0x1AE08},
+       {0x1AE18, 0x1AE24},
+       {0x1AF80, 0x1AF8C},
+       {0x1AFA0, 0x1AFB4},
+       {0x1B000, 0x1B200},
+       {0x1B284, 0x1B288},
+       {0x1B2D0, 0x1B2D8},
+       {0x1B2DC, 0x1B2EC},
+       {0x1B300, 0x1B340},
+       {0x1B374, 0x1B378},
+       {0x1B380, 0x1B384},
+       {0x1B388, 0x1B38C},
+       {0x1B404, 0x1B408},
+       {0x1B420, 0x1B428},
+       {0x1B440, 0x1B444},
+       {0x1B448, 0x1B44C},
+       {0x1B450, 0x1B458},
+       {0x1B45C, 0x1B468},
+       {0x1B584, 0x1B58C},
+       {0x1B68C, 0x1B690},
+       {0x1B6AC, 0x1B6B0},
+       {0x1B7F0, 0x1B7F8},
+       {0x1C800, 0x1CC00},
+       {0x1CE00, 0x1CE04},
+       {0x1CF80, 0x1CF84},
+       {0x1D200, 0x1D800},
+       {0x1E000, 0x20014},
+       {0x20100, 0x20124},
+       {0x21400, 0x217A8},
+       {0x21800, 0x21BA8},
+       {0x21C00, 0x21FA8},
+       {0x22000, 0x223A8},
+       {0x22400, 0x227A8},
+       {0x22800, 0x22BA8},
+       {0x22C00, 0x22FA8},
+       {0x23000, 0x233A8},
+       {0x24000, 0x24034},
+       {0x26000, 0x26064},
+       {0x27000, 0x27024},
+       {0x34000, 0x3400C},
+       {0x34400, 0x3445C},
+       {0x34800, 0x3485C},
+       {0x34C00, 0x34C5C},
+       {0x35000, 0x3505C},
+       {0x35400, 0x3545C},
+       {0x35800, 0x3585C},
+       {0x35C00, 0x35C5C},
+       {0x36000, 0x3605C},
+       {0x38000, 0x38064},
+       {0x38070, 0x380E0},
+       {0x3A000, 0x3A074},
+       {0x40000, 0x400A4},
+       {0x80000, 0x8000C},
+       {0x80010, 0x80020},
+};
+
+static const struct ath10k_mem_region qca6174_hw10_mem_regions[] = {
+       {
+               .type = ATH10K_MEM_REGION_TYPE_DRAM,
+               .start = 0x400000,
+               .len = 0x70000,
+               .name = "DRAM",
+               .section_table = {
+                       .sections = NULL,
+                       .size = 0,
+               },
+       },
+       {
+               .type = ATH10K_MEM_REGION_TYPE_REG,
+
+               /* RTC_SOC_BASE_ADDRESS */
+               .start = 0x0,
+
+               /* WLAN_MBOX_BASE_ADDRESS - RTC_SOC_BASE_ADDRESS */
+               .len = 0x800 - 0x0,
+
+               .name = "REG_PART1",
+               .section_table = {
+                       .sections = NULL,
+                       .size = 0,
+               },
+       },
+       {
+               .type = ATH10K_MEM_REGION_TYPE_REG,
+
+               /* STEREO_BASE_ADDRESS */
+               .start = 0x27000,
+
+               /* USB_BASE_ADDRESS - STEREO_BASE_ADDRESS */
+               .len = 0x60000 - 0x27000,
+
+               .name = "REG_PART2",
+               .section_table = {
+                       .sections = NULL,
+                       .size = 0,
+               },
+       },
+};
+
+static const struct ath10k_mem_region qca6174_hw21_mem_regions[] = {
+       {
+               .type = ATH10K_MEM_REGION_TYPE_DRAM,
+               .start = 0x400000,
+               .len = 0x70000,
+               .name = "DRAM",
+               .section_table = {
+                       .sections = NULL,
+                       .size = 0,
+               },
+       },
+       {
+               .type = ATH10K_MEM_REGION_TYPE_AXI,
+               .start = 0xa0000,
+               .len = 0x18000,
+               .name = "AXI",
+               .section_table = {
+                       .sections = NULL,
+                       .size = 0,
+               },
+       },
+       {
+               .type = ATH10K_MEM_REGION_TYPE_REG,
+               .start = 0x800,
+               .len = 0x80020 - 0x800,
+               .name = "REG_TOTAL",
+               .section_table = {
+                       .sections = qca6174_hw21_register_sections,
+                       .size = ARRAY_SIZE(qca6174_hw21_register_sections),
+               },
+       },
+};
+
+static const struct ath10k_mem_region qca6174_hw30_mem_regions[] = {
+       {
+               .type = ATH10K_MEM_REGION_TYPE_DRAM,
+               .start = 0x400000,
+               .len = 0x90000,
+               .name = "DRAM",
+               .section_table = {
+                       .sections = NULL,
+                       .size = 0,
+               },
+       },
+       {
+               .type = ATH10K_MEM_REGION_TYPE_AXI,
+               .start = 0xa0000,
+               .len = 0x18000,
+               .name = "AXI",
+               .section_table = {
+                       .sections = NULL,
+                       .size = 0,
+               },
+       },
+       {
+               .type = ATH10K_MEM_REGION_TYPE_REG,
+               .start = 0x800,
+               .len = 0x80020 - 0x800,
+               .name = "REG_TOTAL",
+               .section_table = {
+                       .sections = qca6174_hw30_register_sections,
+                       .size = ARRAY_SIZE(qca6174_hw30_register_sections),
+               },
+       },
+
+       /* IRAM dump must be put last */
+       {
+               .type = ATH10K_MEM_REGION_TYPE_IRAM1,
+               .start = 0x00980000,
+               .len = 0x00080000,
+               .name = "IRAM1",
+               .section_table = {
+                       .sections = NULL,
+                       .size = 0,
+               },
+       },
+       {
+               .type = ATH10K_MEM_REGION_TYPE_IRAM2,
+               .start = 0x00a00000,
+               .len = 0x00040000,
+               .name = "IRAM2",
+               .section_table = {
+                       .sections = NULL,
+                       .size = 0,
+               },
+       },
+};
+
+static const struct ath10k_mem_region qca988x_hw20_mem_regions[] = {
+       {
+               .type = ATH10K_MEM_REGION_TYPE_DRAM,
+               .start = 0x400000,
+               .len = 0x50000,
+               .name = "DRAM",
+               .section_table = {
+                       .sections = NULL,
+                       .size = 0,
+               },
+       },
+       {
+               .type = ATH10K_MEM_REGION_TYPE_REG,
+               .start = 0x4000,
+               .len = 0x2000,
+               .name = "REG_PART1",
+               .section_table = {
+                       .sections = NULL,
+                       .size = 0,
+               },
+       },
+       {
+               .type = ATH10K_MEM_REGION_TYPE_REG,
+               .start = 0x8000,
+               .len = 0x58000,
+               .name = "REG_PART2",
+               .section_table = {
+                       .sections = NULL,
+                       .size = 0,
+               },
+       },
+};
+
+static const struct ath10k_hw_mem_layout hw_mem_layouts[] = {
+       {
+               .hw_id = QCA6174_HW_1_0_VERSION,
+               .region_table = {
+                       .regions = qca6174_hw10_mem_regions,
+                       .size = ARRAY_SIZE(qca6174_hw10_mem_regions),
+               },
+       },
+       {
+               .hw_id = QCA6174_HW_1_1_VERSION,
+               .region_table = {
+                       .regions = qca6174_hw10_mem_regions,
+                       .size = ARRAY_SIZE(qca6174_hw10_mem_regions),
+               },
+       },
+       {
+               .hw_id = QCA6174_HW_1_3_VERSION,
+               .region_table = {
+                       .regions = qca6174_hw10_mem_regions,
+                       .size = ARRAY_SIZE(qca6174_hw10_mem_regions),
+               },
+       },
+       {
+               .hw_id = QCA6174_HW_2_1_VERSION,
+               .region_table = {
+                       .regions = qca6174_hw21_mem_regions,
+                       .size = ARRAY_SIZE(qca6174_hw21_mem_regions),
+               },
+       },
+       {
+               .hw_id = QCA6174_HW_3_0_VERSION,
+               .region_table = {
+                       .regions = qca6174_hw30_mem_regions,
+                       .size = ARRAY_SIZE(qca6174_hw30_mem_regions),
+               },
+       },
+       {
+               .hw_id = QCA6174_HW_3_2_VERSION,
+               .region_table = {
+                       .regions = qca6174_hw30_mem_regions,
+                       .size = ARRAY_SIZE(qca6174_hw30_mem_regions),
+               },
+       },
+       {
+               .hw_id = QCA9377_HW_1_1_DEV_VERSION,
+               .region_table = {
+                       .regions = qca6174_hw30_mem_regions,
+                       .size = ARRAY_SIZE(qca6174_hw30_mem_regions),
+               },
+       },
+       {
+               .hw_id = QCA988X_HW_2_0_VERSION,
+               .region_table = {
+                       .regions = qca988x_hw20_mem_regions,
+                       .size = ARRAY_SIZE(qca988x_hw20_mem_regions),
+               },
+       },
+};
+
+static u32 ath10k_coredump_get_ramdump_size(struct ath10k *ar)
+{
+       const struct ath10k_hw_mem_layout *hw;
+       const struct ath10k_mem_region *mem_region;
+       size_t size = 0;
+       int i;
+
+       hw = ath10k_coredump_get_mem_layout(ar);
+
+       if (!hw)
+               return 0;
+
+       mem_region = &hw->region_table.regions[0];
+
+       for (i = 0; i < hw->region_table.size; i++) {
+               size += mem_region->len;
+               mem_region++;
+       }
+
+       /* reserve space for the headers */
+       size += hw->region_table.size * sizeof(struct ath10k_dump_ram_data_hdr);
+
+       /* make sure it is aligned 16 bytes for debug message print out */
+       size = ALIGN(size, 16);
+
+       return size;
+}
+
+const struct ath10k_hw_mem_layout *ath10k_coredump_get_mem_layout(struct ath10k *ar)
+{
+       int i;
+
+       if (!test_bit(ATH10K_FW_CRASH_DUMP_RAM_DATA, &ath10k_coredump_mask))
+               return NULL;
+
+       if (WARN_ON(ar->target_version == 0))
+               return NULL;
+
+       for (i = 0; i < ARRAY_SIZE(hw_mem_layouts); i++) {
+               if (ar->target_version == hw_mem_layouts[i].hw_id)
+                       return &hw_mem_layouts[i];
+       }
+
+       return NULL;
+}
+EXPORT_SYMBOL(ath10k_coredump_get_mem_layout);
+
+struct ath10k_fw_crash_data *ath10k_coredump_new(struct ath10k *ar)
+{
+       struct ath10k_fw_crash_data *crash_data = ar->coredump.fw_crash_data;
+
+       lockdep_assert_held(&ar->data_lock);
+
+       if (ath10k_coredump_mask == 0)
+               /* coredump disabled */
+               return NULL;
+
+       guid_gen(&crash_data->guid);
+       ktime_get_real_ts64(&crash_data->timestamp);
+
+       return crash_data;
+}
+EXPORT_SYMBOL(ath10k_coredump_new);
+
+static struct ath10k_dump_file_data *ath10k_coredump_build(struct ath10k *ar)
+{
+       struct ath10k_fw_crash_data *crash_data = ar->coredump.fw_crash_data;
+       struct ath10k_ce_crash_hdr *ce_hdr;
+       struct ath10k_dump_file_data *dump_data;
+       struct ath10k_tlv_dump_data *dump_tlv;
+       size_t hdr_len = sizeof(*dump_data);
+       size_t len, sofar = 0;
+       unsigned char *buf;
+
+       len = hdr_len;
+
+       if (test_bit(ATH10K_FW_CRASH_DUMP_REGISTERS, &ath10k_coredump_mask))
+               len += sizeof(*dump_tlv) + sizeof(crash_data->registers);
+
+       if (test_bit(ATH10K_FW_CRASH_DUMP_CE_DATA, &ath10k_coredump_mask))
+               len += sizeof(*dump_tlv) + sizeof(*ce_hdr) +
+                       CE_COUNT * sizeof(ce_hdr->entries[0]);
+
+       if (test_bit(ATH10K_FW_CRASH_DUMP_RAM_DATA, &ath10k_coredump_mask))
+               len += sizeof(*dump_tlv) + crash_data->ramdump_buf_len;
+
+       sofar += hdr_len;
+
+       /* This is going to get big when we start dumping FW RAM and such,
+        * so go ahead and use vmalloc.
+        */
+       buf = vzalloc(len);
+       if (!buf)
+               return NULL;
+
+       spin_lock_bh(&ar->data_lock);
+
+       dump_data = (struct ath10k_dump_file_data *)(buf);
+       strlcpy(dump_data->df_magic, "ATH10K-FW-DUMP",
+               sizeof(dump_data->df_magic));
+       dump_data->len = cpu_to_le32(len);
+
+       dump_data->version = cpu_to_le32(ATH10K_FW_CRASH_DUMP_VERSION);
+
+       guid_copy(&dump_data->guid, &crash_data->guid);
+       dump_data->chip_id = cpu_to_le32(ar->chip_id);
+       dump_data->bus_type = cpu_to_le32(0);
+       dump_data->target_version = cpu_to_le32(ar->target_version);
+       dump_data->fw_version_major = cpu_to_le32(ar->fw_version_major);
+       dump_data->fw_version_minor = cpu_to_le32(ar->fw_version_minor);
+       dump_data->fw_version_release = cpu_to_le32(ar->fw_version_release);
+       dump_data->fw_version_build = cpu_to_le32(ar->fw_version_build);
+       dump_data->phy_capability = cpu_to_le32(ar->phy_capability);
+       dump_data->hw_min_tx_power = cpu_to_le32(ar->hw_min_tx_power);
+       dump_data->hw_max_tx_power = cpu_to_le32(ar->hw_max_tx_power);
+       dump_data->ht_cap_info = cpu_to_le32(ar->ht_cap_info);
+       dump_data->vht_cap_info = cpu_to_le32(ar->vht_cap_info);
+       dump_data->num_rf_chains = cpu_to_le32(ar->num_rf_chains);
+
+       strlcpy(dump_data->fw_ver, ar->hw->wiphy->fw_version,
+               sizeof(dump_data->fw_ver));
+
+       dump_data->kernel_ver_code = 0;
+       strlcpy(dump_data->kernel_ver, init_utsname()->release,
+               sizeof(dump_data->kernel_ver));
+
+       dump_data->tv_sec = cpu_to_le64(crash_data->timestamp.tv_sec);
+       dump_data->tv_nsec = cpu_to_le64(crash_data->timestamp.tv_nsec);
+
+       if (test_bit(ATH10K_FW_CRASH_DUMP_REGISTERS, &ath10k_coredump_mask)) {
+               dump_tlv = (struct ath10k_tlv_dump_data *)(buf + sofar);
+               dump_tlv->type = cpu_to_le32(ATH10K_FW_CRASH_DUMP_REGISTERS);
+               dump_tlv->tlv_len = cpu_to_le32(sizeof(crash_data->registers));
+               memcpy(dump_tlv->tlv_data, &crash_data->registers,
+                      sizeof(crash_data->registers));
+               sofar += sizeof(*dump_tlv) + sizeof(crash_data->registers);
+       }
+
+       if (test_bit(ATH10K_FW_CRASH_DUMP_CE_DATA, &ath10k_coredump_mask)) {
+               dump_tlv = (struct ath10k_tlv_dump_data *)(buf + sofar);
+               dump_tlv->type = cpu_to_le32(ATH10K_FW_CRASH_DUMP_CE_DATA);
+               dump_tlv->tlv_len = cpu_to_le32(sizeof(*ce_hdr) +
+                                               CE_COUNT * sizeof(ce_hdr->entries[0]));
+               ce_hdr = (struct ath10k_ce_crash_hdr *)(dump_tlv->tlv_data);
+               ce_hdr->ce_count = cpu_to_le32(CE_COUNT);
+               memset(ce_hdr->reserved, 0, sizeof(ce_hdr->reserved));
+               memcpy(ce_hdr->entries, crash_data->ce_crash_data,
+                      CE_COUNT * sizeof(ce_hdr->entries[0]));
+               sofar += sizeof(*dump_tlv) + sizeof(*ce_hdr) +
+                       CE_COUNT * sizeof(ce_hdr->entries[0]);
+       }
+
+       /* Gather ram dump */
+       if (test_bit(ATH10K_FW_CRASH_DUMP_RAM_DATA, &ath10k_coredump_mask)) {
+               dump_tlv = (struct ath10k_tlv_dump_data *)(buf + sofar);
+               dump_tlv->type = cpu_to_le32(ATH10K_FW_CRASH_DUMP_RAM_DATA);
+               dump_tlv->tlv_len = cpu_to_le32(crash_data->ramdump_buf_len);
+               memcpy(dump_tlv->tlv_data, crash_data->ramdump_buf,
+                      crash_data->ramdump_buf_len);
+               sofar += sizeof(*dump_tlv) + crash_data->ramdump_buf_len;
+       }
+
+       spin_unlock_bh(&ar->data_lock);
+
+       return dump_data;
+}
+
+int ath10k_coredump_submit(struct ath10k *ar)
+{
+       struct ath10k_dump_file_data *dump;
+
+       if (ath10k_coredump_mask == 0)
+               /* coredump disabled */
+               return 0;
+
+       dump = ath10k_coredump_build(ar);
+       if (!dump) {
+               ath10k_warn(ar, "no crash dump data found for devcoredump");
+               return -ENODATA;
+       }
+
+       dev_coredumpv(ar->dev, dump, le32_to_cpu(dump->len), GFP_KERNEL);
+
+       return 0;
+}
+
+int ath10k_coredump_create(struct ath10k *ar)
+{
+       if (ath10k_coredump_mask == 0)
+               /* coredump disabled */
+               return 0;
+
+       ar->coredump.fw_crash_data = vzalloc(sizeof(*ar->coredump.fw_crash_data));
+       if (!ar->coredump.fw_crash_data)
+               return -ENOMEM;
+
+       return 0;
+}
+
+int ath10k_coredump_register(struct ath10k *ar)
+{
+       struct ath10k_fw_crash_data *crash_data = ar->coredump.fw_crash_data;
+
+       if (test_bit(ATH10K_FW_CRASH_DUMP_RAM_DATA, &ath10k_coredump_mask)) {
+               crash_data->ramdump_buf_len = ath10k_coredump_get_ramdump_size(ar);
+
+               crash_data->ramdump_buf = vzalloc(crash_data->ramdump_buf_len);
+               if (!crash_data->ramdump_buf)
+                       return -ENOMEM;
+       }
+
+       return 0;
+}
+
+void ath10k_coredump_unregister(struct ath10k *ar)
+{
+       struct ath10k_fw_crash_data *crash_data = ar->coredump.fw_crash_data;
+
+       vfree(crash_data->ramdump_buf);
+}
+
+void ath10k_coredump_destroy(struct ath10k *ar)
+{
+       if (ar->coredump.fw_crash_data->ramdump_buf) {
+               vfree(ar->coredump.fw_crash_data->ramdump_buf);
+               ar->coredump.fw_crash_data->ramdump_buf = NULL;
+               ar->coredump.fw_crash_data->ramdump_buf_len = 0;
+       }
+
+       vfree(ar->coredump.fw_crash_data);
+       ar->coredump.fw_crash_data = NULL;
+}
diff --git a/drivers/net/wireless/ath/ath10k/coredump.h b/drivers/net/wireless/ath/ath10k/coredump.h
new file mode 100644 (file)
index 0000000..bfee130
--- /dev/null
@@ -0,0 +1,225 @@
+/*
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _COREDUMP_H_
+#define _COREDUMP_H_
+
+#include "core.h"
+
+#define ATH10K_FW_CRASH_DUMP_VERSION 1
+
+/**
+ * enum ath10k_fw_crash_dump_type - types of data in the dump file
+ * @ATH10K_FW_CRASH_DUMP_REGDUMP: Register crash dump in binary format
+ */
+enum ath10k_fw_crash_dump_type {
+       ATH10K_FW_CRASH_DUMP_REGISTERS = 0,
+       ATH10K_FW_CRASH_DUMP_CE_DATA = 1,
+
+       /* contains multiple struct ath10k_dump_ram_data_hdr */
+       ATH10K_FW_CRASH_DUMP_RAM_DATA = 2,
+
+       ATH10K_FW_CRASH_DUMP_MAX,
+};
+
+struct ath10k_tlv_dump_data {
+       /* see ath10k_fw_crash_dump_type above */
+       __le32 type;
+
+       /* in bytes */
+       __le32 tlv_len;
+
+       /* pad to 32-bit boundaries as needed */
+       u8 tlv_data[];
+} __packed;
+
+struct ath10k_dump_file_data {
+       /* dump file information */
+
+       /* "ATH10K-FW-DUMP" */
+       char df_magic[16];
+
+       __le32 len;
+
+       /* file dump version */
+       __le32 version;
+
+       /* some info we can get from ath10k struct that might help */
+
+       guid_t guid;
+
+       __le32 chip_id;
+
+       /* 0 for now, in place for later hardware */
+       __le32 bus_type;
+
+       __le32 target_version;
+       __le32 fw_version_major;
+       __le32 fw_version_minor;
+       __le32 fw_version_release;
+       __le32 fw_version_build;
+       __le32 phy_capability;
+       __le32 hw_min_tx_power;
+       __le32 hw_max_tx_power;
+       __le32 ht_cap_info;
+       __le32 vht_cap_info;
+       __le32 num_rf_chains;
+
+       /* firmware version string */
+       char fw_ver[ETHTOOL_FWVERS_LEN];
+
+       /* Kernel related information */
+
+       /* time-of-day stamp */
+       __le64 tv_sec;
+
+       /* time-of-day stamp, nano-seconds */
+       __le64 tv_nsec;
+
+       /* LINUX_VERSION_CODE */
+       __le32 kernel_ver_code;
+
+       /* VERMAGIC_STRING */
+       char kernel_ver[64];
+
+       /* room for growth w/out changing binary format */
+       u8 unused[128];
+
+       /* struct ath10k_tlv_dump_data + more */
+       u8 data[0];
+} __packed;
+
+struct ath10k_dump_ram_data_hdr {
+       /* enum ath10k_mem_region_type */
+       __le32 region_type;
+
+       __le32 start;
+
+       /* length of payload data, not including this header */
+       __le32 length;
+
+       u8 data[0];
+};
+
+/* magic number to fill the holes not copied due to sections in regions */
+#define ATH10K_MAGIC_NOT_COPIED                0xAA
+
+/* part of user space ABI */
+enum ath10k_mem_region_type {
+       ATH10K_MEM_REGION_TYPE_REG      = 1,
+       ATH10K_MEM_REGION_TYPE_DRAM     = 2,
+       ATH10K_MEM_REGION_TYPE_AXI      = 3,
+       ATH10K_MEM_REGION_TYPE_IRAM1    = 4,
+       ATH10K_MEM_REGION_TYPE_IRAM2    = 5,
+};
+
+/* Define a section of the region which should be copied. As not all parts
+ * of the memory is possible to copy, for example some of the registers can
+ * be like that, sections can be used to define what is safe to copy.
+ *
+ * To minimize the size of the array, the list must obey the format:
+ * '{start0,stop0},{start1,stop1},{start2,stop2}....' The values below must
+ * also obey to 'start0 < stop0 < start1 < stop1 < start2 < ...', otherwise
+ * we may encouter error in the dump processing.
+ */
+struct ath10k_mem_section {
+       u32 start;
+       u32 end;
+};
+
+/* One region of a memory layout. If the sections field is null entire
+ * region is copied. If sections is non-null only the areas specified in
+ * sections are copied and rest of the areas are filled with
+ * ATH10K_MAGIC_NOT_COPIED.
+ */
+struct ath10k_mem_region {
+       enum ath10k_mem_region_type type;
+       u32 start;
+       u32 len;
+
+       const char *name;
+
+       struct {
+               const struct ath10k_mem_section *sections;
+               u32 size;
+       } section_table;
+};
+
+/* Contains the memory layout of a hardware version identified with the
+ * hardware id, split into regions.
+ */
+struct ath10k_hw_mem_layout {
+       u32 hw_id;
+
+       struct {
+               const struct ath10k_mem_region *regions;
+               int size;
+       } region_table;
+};
+
+/* FIXME: where to put this? */
+extern unsigned long ath10k_coredump_mask;
+
+#ifdef CONFIG_DEV_COREDUMP
+
+int ath10k_coredump_submit(struct ath10k *ar);
+struct ath10k_fw_crash_data *ath10k_coredump_new(struct ath10k *ar);
+int ath10k_coredump_create(struct ath10k *ar);
+int ath10k_coredump_register(struct ath10k *ar);
+void ath10k_coredump_unregister(struct ath10k *ar);
+void ath10k_coredump_destroy(struct ath10k *ar);
+
+const struct ath10k_hw_mem_layout *ath10k_coredump_get_mem_layout(struct ath10k *ar);
+
+#else /* CONFIG_DEV_COREDUMP */
+
+static inline int ath10k_coredump_submit(struct ath10k *ar)
+{
+       return 0;
+}
+
+static inline struct ath10k_fw_crash_data *ath10k_coredump_new(struct ath10k *ar)
+{
+       return NULL;
+}
+
+static inline int ath10k_coredump_create(struct ath10k *ar)
+{
+       return 0;
+}
+
+static inline int ath10k_coredump_register(struct ath10k *ar)
+{
+       return 0;
+}
+
+static inline void ath10k_coredump_unregister(struct ath10k *ar)
+{
+}
+
+static inline void ath10k_coredump_destroy(struct ath10k *ar)
+{
+}
+
+static inline const struct ath10k_hw_mem_layout *
+ath10k_coredump_get_mem_layout(struct ath10k *ar)
+{
+       return NULL;
+}
+
+#endif /* CONFIG_DEV_COREDUMP */
+
+#endif /* _COREDUMP_H_ */
index 181fd8e..6d836a2 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
 #include <linux/module.h>
 #include <linux/debugfs.h>
 #include <linux/vmalloc.h>
-#include <linux/utsname.h>
 #include <linux/crc32.h>
 #include <linux/firmware.h>
-#include <linux/devcoredump.h>
 
 #include "core.h"
 #include "debug.h"
 
 #define ATH10K_DEBUG_CAL_DATA_LEN 12064
 
-#define ATH10K_FW_CRASH_DUMP_VERSION 1
-
-/**
- * enum ath10k_fw_crash_dump_type - types of data in the dump file
- * @ATH10K_FW_CRASH_DUMP_REGDUMP: Register crash dump in binary format
- */
-enum ath10k_fw_crash_dump_type {
-       ATH10K_FW_CRASH_DUMP_REGISTERS = 0,
-       ATH10K_FW_CRASH_DUMP_CE_DATA = 1,
-
-       ATH10K_FW_CRASH_DUMP_MAX,
-};
-
-struct ath10k_tlv_dump_data {
-       /* see ath10k_fw_crash_dump_type above */
-       __le32 type;
-
-       /* in bytes */
-       __le32 tlv_len;
-
-       /* pad to 32-bit boundaries as needed */
-       u8 tlv_data[];
-} __packed;
-
-struct ath10k_dump_file_data {
-       /* dump file information */
-
-       /* "ATH10K-FW-DUMP" */
-       char df_magic[16];
-
-       __le32 len;
-
-       /* file dump version */
-       __le32 version;
-
-       /* some info we can get from ath10k struct that might help */
-
-       guid_t guid;
-
-       __le32 chip_id;
-
-       /* 0 for now, in place for later hardware */
-       __le32 bus_type;
-
-       __le32 target_version;
-       __le32 fw_version_major;
-       __le32 fw_version_minor;
-       __le32 fw_version_release;
-       __le32 fw_version_build;
-       __le32 phy_capability;
-       __le32 hw_min_tx_power;
-       __le32 hw_max_tx_power;
-       __le32 ht_cap_info;
-       __le32 vht_cap_info;
-       __le32 num_rf_chains;
-
-       /* firmware version string */
-       char fw_ver[ETHTOOL_FWVERS_LEN];
-
-       /* Kernel related information */
-
-       /* time-of-day stamp */
-       __le64 tv_sec;
-
-       /* time-of-day stamp, nano-seconds */
-       __le64 tv_nsec;
-
-       /* LINUX_VERSION_CODE */
-       __le32 kernel_ver_code;
-
-       /* VERMAGIC_STRING */
-       char kernel_ver[64];
-
-       /* room for growth w/out changing binary format */
-       u8 unused[128];
-
-       /* struct ath10k_tlv_dump_data + more */
-       u8 data[0];
-} __packed;
-
 void ath10k_info(struct ath10k *ar, const char *fmt, ...)
 {
        struct va_format vaf = {
@@ -711,189 +629,6 @@ static const struct file_operations fops_chip_id = {
        .llseek = default_llseek,
 };
 
-struct ath10k_fw_crash_data *
-ath10k_debug_get_new_fw_crash_data(struct ath10k *ar)
-{
-       struct ath10k_fw_crash_data *crash_data = ar->debug.fw_crash_data;
-
-       lockdep_assert_held(&ar->data_lock);
-
-       crash_data->crashed_since_read = true;
-       guid_gen(&crash_data->guid);
-       ktime_get_real_ts64(&crash_data->timestamp);
-
-       return crash_data;
-}
-EXPORT_SYMBOL(ath10k_debug_get_new_fw_crash_data);
-
-static struct ath10k_dump_file_data *ath10k_build_dump_file(struct ath10k *ar,
-                                                           bool mark_read)
-{
-       struct ath10k_fw_crash_data *crash_data = ar->debug.fw_crash_data;
-       struct ath10k_ce_crash_hdr *ce_hdr;
-       struct ath10k_dump_file_data *dump_data;
-       struct ath10k_tlv_dump_data *dump_tlv;
-       size_t hdr_len = sizeof(*dump_data);
-       size_t len, sofar = 0;
-       unsigned char *buf;
-
-       len = hdr_len;
-       len += sizeof(*dump_tlv) + sizeof(crash_data->registers);
-       len += sizeof(*dump_tlv) + sizeof(*ce_hdr) +
-               CE_COUNT * sizeof(ce_hdr->entries[0]);
-
-       sofar += hdr_len;
-
-       /* This is going to get big when we start dumping FW RAM and such,
-        * so go ahead and use vmalloc.
-        */
-       buf = vzalloc(len);
-       if (!buf)
-               return NULL;
-
-       spin_lock_bh(&ar->data_lock);
-
-       if (!crash_data->crashed_since_read) {
-               spin_unlock_bh(&ar->data_lock);
-               vfree(buf);
-               return NULL;
-       }
-
-       dump_data = (struct ath10k_dump_file_data *)(buf);
-       strlcpy(dump_data->df_magic, "ATH10K-FW-DUMP",
-               sizeof(dump_data->df_magic));
-       dump_data->len = cpu_to_le32(len);
-
-       dump_data->version = cpu_to_le32(ATH10K_FW_CRASH_DUMP_VERSION);
-
-       guid_copy(&dump_data->guid, &crash_data->guid);
-       dump_data->chip_id = cpu_to_le32(ar->chip_id);
-       dump_data->bus_type = cpu_to_le32(0);
-       dump_data->target_version = cpu_to_le32(ar->target_version);
-       dump_data->fw_version_major = cpu_to_le32(ar->fw_version_major);
-       dump_data->fw_version_minor = cpu_to_le32(ar->fw_version_minor);
-       dump_data->fw_version_release = cpu_to_le32(ar->fw_version_release);
-       dump_data->fw_version_build = cpu_to_le32(ar->fw_version_build);
-       dump_data->phy_capability = cpu_to_le32(ar->phy_capability);
-       dump_data->hw_min_tx_power = cpu_to_le32(ar->hw_min_tx_power);
-       dump_data->hw_max_tx_power = cpu_to_le32(ar->hw_max_tx_power);
-       dump_data->ht_cap_info = cpu_to_le32(ar->ht_cap_info);
-       dump_data->vht_cap_info = cpu_to_le32(ar->vht_cap_info);
-       dump_data->num_rf_chains = cpu_to_le32(ar->num_rf_chains);
-
-       strlcpy(dump_data->fw_ver, ar->hw->wiphy->fw_version,
-               sizeof(dump_data->fw_ver));
-
-       dump_data->kernel_ver_code = 0;
-       strlcpy(dump_data->kernel_ver, init_utsname()->release,
-               sizeof(dump_data->kernel_ver));
-
-       dump_data->tv_sec = cpu_to_le64(crash_data->timestamp.tv_sec);
-       dump_data->tv_nsec = cpu_to_le64(crash_data->timestamp.tv_nsec);
-
-       /* Gather crash-dump */
-       dump_tlv = (struct ath10k_tlv_dump_data *)(buf + sofar);
-       dump_tlv->type = cpu_to_le32(ATH10K_FW_CRASH_DUMP_REGISTERS);
-       dump_tlv->tlv_len = cpu_to_le32(sizeof(crash_data->registers));
-       memcpy(dump_tlv->tlv_data, &crash_data->registers,
-              sizeof(crash_data->registers));
-       sofar += sizeof(*dump_tlv) + sizeof(crash_data->registers);
-
-       dump_tlv = (struct ath10k_tlv_dump_data *)(buf + sofar);
-       dump_tlv->type = cpu_to_le32(ATH10K_FW_CRASH_DUMP_CE_DATA);
-       dump_tlv->tlv_len = cpu_to_le32(sizeof(*ce_hdr) +
-                                       CE_COUNT * sizeof(ce_hdr->entries[0]));
-       ce_hdr = (struct ath10k_ce_crash_hdr *)(dump_tlv->tlv_data);
-       ce_hdr->ce_count = cpu_to_le32(CE_COUNT);
-       memset(ce_hdr->reserved, 0, sizeof(ce_hdr->reserved));
-       memcpy(ce_hdr->entries, crash_data->ce_crash_data,
-              CE_COUNT * sizeof(ce_hdr->entries[0]));
-       sofar += sizeof(*dump_tlv) + sizeof(*ce_hdr) +
-                CE_COUNT * sizeof(ce_hdr->entries[0]);
-
-       ar->debug.fw_crash_data->crashed_since_read = !mark_read;
-
-       spin_unlock_bh(&ar->data_lock);
-
-       return dump_data;
-}
-
-int ath10k_debug_fw_devcoredump(struct ath10k *ar)
-{
-       struct ath10k_dump_file_data *dump;
-       void *dump_ptr;
-       u32 dump_len;
-
-       /* To keep the dump file available also for debugfs don't mark the
-        * file read, only debugfs should do that.
-        */
-       dump = ath10k_build_dump_file(ar, false);
-       if (!dump) {
-               ath10k_warn(ar, "no crash dump data found for devcoredump");
-               return -ENODATA;
-       }
-
-       /* Make a copy of the dump file for dev_coredumpv() as during the
-        * transition period we need to own the original file. Once
-        * fw_crash_dump debugfs file is removed no need to have a copy
-        * anymore.
-        */
-       dump_len = le32_to_cpu(dump->len);
-       dump_ptr = vzalloc(dump_len);
-
-       if (!dump_ptr)
-               return -ENOMEM;
-
-       memcpy(dump_ptr, dump, dump_len);
-
-       dev_coredumpv(ar->dev, dump_ptr, dump_len, GFP_KERNEL);
-
-       return 0;
-}
-
-static int ath10k_fw_crash_dump_open(struct inode *inode, struct file *file)
-{
-       struct ath10k *ar = inode->i_private;
-       struct ath10k_dump_file_data *dump;
-
-       ath10k_warn(ar, "fw_crash_dump debugfs file is deprecated, please use /sys/class/devcoredump instead.");
-
-       dump = ath10k_build_dump_file(ar, true);
-       if (!dump)
-               return -ENODATA;
-
-       file->private_data = dump;
-
-       return 0;
-}
-
-static ssize_t ath10k_fw_crash_dump_read(struct file *file,
-                                        char __user *user_buf,
-                                        size_t count, loff_t *ppos)
-{
-       struct ath10k_dump_file_data *dump_file = file->private_data;
-
-       return simple_read_from_buffer(user_buf, count, ppos,
-                                      dump_file,
-                                      le32_to_cpu(dump_file->len));
-}
-
-static int ath10k_fw_crash_dump_release(struct inode *inode,
-                                       struct file *file)
-{
-       vfree(file->private_data);
-
-       return 0;
-}
-
-static const struct file_operations fops_fw_crash_dump = {
-       .open = ath10k_fw_crash_dump_open,
-       .read = ath10k_fw_crash_dump_read,
-       .release = ath10k_fw_crash_dump_release,
-       .owner = THIS_MODULE,
-       .llseek = default_llseek,
-};
-
 static ssize_t ath10k_reg_addr_read(struct file *file,
                                    char __user *user_buf,
                                    size_t count, loff_t *ppos)
@@ -2402,10 +2137,6 @@ static const struct file_operations fops_fw_checksums = {
 
 int ath10k_debug_create(struct ath10k *ar)
 {
-       ar->debug.fw_crash_data = vzalloc(sizeof(*ar->debug.fw_crash_data));
-       if (!ar->debug.fw_crash_data)
-               return -ENOMEM;
-
        ar->debug.cal_data = vzalloc(ATH10K_DEBUG_CAL_DATA_LEN);
        if (!ar->debug.cal_data)
                return -ENOMEM;
@@ -2420,9 +2151,6 @@ int ath10k_debug_create(struct ath10k *ar)
 
 void ath10k_debug_destroy(struct ath10k *ar)
 {
-       vfree(ar->debug.fw_crash_data);
-       ar->debug.fw_crash_data = NULL;
-
        vfree(ar->debug.cal_data);
        ar->debug.cal_data = NULL;
 
@@ -2460,9 +2188,6 @@ int ath10k_debug_register(struct ath10k *ar)
        debugfs_create_file("simulate_fw_crash", 0600, ar->debug.debugfs_phy, ar,
                            &fops_simulate_fw_crash);
 
-       debugfs_create_file("fw_crash_dump", 0400, ar->debug.debugfs_phy, ar,
-                           &fops_fw_crash_dump);
-
        debugfs_create_file("reg_addr", 0600, ar->debug.debugfs_phy, ar,
                            &fops_reg_addr);
 
index 5e66299..e543088 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -42,6 +42,7 @@ enum ath10k_debug_mask {
        ATH10K_DBG_SDIO_DUMP    = 0x00020000,
        ATH10K_DBG_USB          = 0x00040000,
        ATH10K_DBG_USB_BULK     = 0x00080000,
+       ATH10K_DBG_SNOC         = 0x00100000,
        ATH10K_DBG_ANY          = 0xffffffff,
 };
 
@@ -100,13 +101,8 @@ void ath10k_debug_unregister(struct ath10k *ar);
 void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb);
 void ath10k_debug_tpc_stats_process(struct ath10k *ar,
                                    struct ath10k_tpc_stats *tpc_stats);
-struct ath10k_fw_crash_data *
-ath10k_debug_get_new_fw_crash_data(struct ath10k *ar);
-
 void ath10k_debug_dbglog_add(struct ath10k *ar, u8 *buffer, int len);
 
-int ath10k_debug_fw_devcoredump(struct ath10k *ar);
-
 #define ATH10K_DFS_STAT_INC(ar, c) (ar->debug.dfs_stats.c++)
 
 void ath10k_debug_get_et_strings(struct ieee80211_hw *hw,
@@ -173,12 +169,6 @@ static inline void ath10k_debug_dbglog_add(struct ath10k *ar, u8 *buffer,
 {
 }
 
-static inline struct ath10k_fw_crash_data *
-ath10k_debug_get_new_fw_crash_data(struct ath10k *ar)
-{
-       return NULL;
-}
-
 static inline u64 ath10k_debug_get_fw_dbglog_mask(struct ath10k *ar)
 {
        return 0;
@@ -189,11 +179,6 @@ static inline u32 ath10k_debug_get_fw_dbglog_level(struct ath10k *ar)
        return 0;
 }
 
-static inline int ath10k_debug_fw_devcoredump(struct ath10k *ar)
-{
-       return 0;
-}
-
 #define ATH10K_DFS_STAT_INC(ar, c) do { } while (0)
 
 #define ath10k_debug_get_et_strings NULL
index ff96f70..b260b09 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014 Qualcomm Atheros, Inc.
+ * Copyright (c) 2014-2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
index 6679dd9..6da4e33 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2015,2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
index e5c80f5..492dc5b 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
index 24663b0..a2f8814 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2016 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
index cd160b1..625198d 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -207,6 +207,9 @@ int ath10k_htt_init(struct ath10k *ar)
                WARN_ON(1);
                return -EINVAL;
        }
+       ath10k_htt_set_tx_ops(htt);
+       ath10k_htt_set_rx_ops(htt);
+
        return 0;
 }
 
@@ -254,11 +257,11 @@ int ath10k_htt_setup(struct ath10k_htt *htt)
                return status;
        }
 
-       status = ath10k_htt_send_frag_desc_bank_cfg(htt);
+       status = htt->tx_ops->htt_send_frag_desc_bank_cfg(htt);
        if (status)
                return status;
 
-       status = ath10k_htt_send_rx_ring_cfg_ll(htt);
+       status = htt->tx_ops->htt_send_rx_ring_cfg(htt);
        if (status) {
                ath10k_warn(ar, "failed to setup rx ring: %d\n",
                            status);
index 7bd93d6..360c71b 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -107,6 +107,14 @@ struct htt_msdu_ext_desc {
        struct htt_data_tx_desc_frag frags[6];
 };
 
+struct htt_msdu_ext_desc_64 {
+       __le32 tso_flag[5];
+       __le16 ip_identification;
+       u8 flags;
+       u8 reserved;
+       struct htt_data_tx_desc_frag frags[6];
+};
+
 #define        HTT_MSDU_EXT_DESC_FLAG_IPV4_CSUM_ENABLE         BIT(0)
 #define        HTT_MSDU_EXT_DESC_FLAG_UDP_IPV4_CSUM_ENABLE     BIT(1)
 #define        HTT_MSDU_EXT_DESC_FLAG_UDP_IPV6_CSUM_ENABLE     BIT(2)
@@ -179,6 +187,22 @@ struct htt_data_tx_desc {
        u8 prefetch[0]; /* start of frame, for FW classification engine */
 } __packed;
 
+struct htt_data_tx_desc_64 {
+       u8 flags0; /* %HTT_DATA_TX_DESC_FLAGS0_ */
+       __le16 flags1; /* %HTT_DATA_TX_DESC_FLAGS1_ */
+       __le16 len;
+       __le16 id;
+       __le64 frags_paddr;
+       union {
+               __le32 peerid;
+               struct {
+                       __le16 peerid;
+                       __le16 freq;
+               } __packed offchan_tx;
+       } __packed;
+       u8 prefetch[0]; /* start of frame, for FW classification engine */
+} __packed;
+
 enum htt_rx_ring_flags {
        HTT_RX_RING_FLAGS_MAC80211_HDR = 1 << 0,
        HTT_RX_RING_FLAGS_MSDU_PAYLOAD = 1 << 1,
@@ -200,8 +224,11 @@ enum htt_rx_ring_flags {
 
 #define HTT_RX_RING_SIZE_MIN 128
 #define HTT_RX_RING_SIZE_MAX 2048
+#define HTT_RX_RING_SIZE HTT_RX_RING_SIZE_MAX
+#define HTT_RX_RING_FILL_LEVEL (((HTT_RX_RING_SIZE) / 2) - 1)
+#define HTT_RX_RING_FILL_LEVEL_DUAL_MAC (HTT_RX_RING_SIZE - 1)
 
-struct htt_rx_ring_setup_ring {
+struct htt_rx_ring_setup_ring32 {
        __le32 fw_idx_shadow_reg_paddr;
        __le32 rx_ring_base_paddr;
        __le16 rx_ring_len; /* in 4-byte words */
@@ -222,14 +249,40 @@ struct htt_rx_ring_setup_ring {
        __le16 frag_info_offset;
 } __packed;
 
+struct htt_rx_ring_setup_ring64 {
+       __le64 fw_idx_shadow_reg_paddr;
+       __le64 rx_ring_base_paddr;
+       __le16 rx_ring_len; /* in 4-byte words */
+       __le16 rx_ring_bufsize; /* rx skb size - in bytes */
+       __le16 flags; /* %HTT_RX_RING_FLAGS_ */
+       __le16 fw_idx_init_val;
+
+       /* the following offsets are in 4-byte units */
+       __le16 mac80211_hdr_offset;
+       __le16 msdu_payload_offset;
+       __le16 ppdu_start_offset;
+       __le16 ppdu_end_offset;
+       __le16 mpdu_start_offset;
+       __le16 mpdu_end_offset;
+       __le16 msdu_start_offset;
+       __le16 msdu_end_offset;
+       __le16 rx_attention_offset;
+       __le16 frag_info_offset;
+} __packed;
+
 struct htt_rx_ring_setup_hdr {
        u8 num_rings; /* supported values: 1, 2 */
        __le16 rsvd0;
 } __packed;
 
-struct htt_rx_ring_setup {
+struct htt_rx_ring_setup_32 {
+       struct htt_rx_ring_setup_hdr hdr;
+       struct htt_rx_ring_setup_ring32 rings[0];
+} __packed;
+
+struct htt_rx_ring_setup_64 {
        struct htt_rx_ring_setup_hdr hdr;
-       struct htt_rx_ring_setup_ring rings[0];
+       struct htt_rx_ring_setup_ring64 rings[0];
 } __packed;
 
 /*
@@ -855,13 +908,23 @@ struct htt_rx_in_ord_msdu_desc {
        u8 reserved;
 } __packed;
 
+struct htt_rx_in_ord_msdu_desc_ext {
+       __le64 msdu_paddr;
+       __le16 msdu_len;
+       u8 fw_desc;
+       u8 reserved;
+} __packed;
+
 struct htt_rx_in_ord_ind {
        u8 info;
        __le16 peer_id;
        u8 vdev_id;
        u8 reserved;
        __le16 msdu_count;
-       struct htt_rx_in_ord_msdu_desc msdu_descs[0];
+       union {
+               struct htt_rx_in_ord_msdu_desc msdu_descs32[0];
+               struct htt_rx_in_ord_msdu_desc_ext msdu_descs64[0];
+       } __packed;
 } __packed;
 
 #define HTT_RX_IN_ORD_IND_INFO_TID_MASK                0x0000001f
@@ -1351,7 +1414,7 @@ struct htt_q_state_conf {
        u8 pad[2];
 } __packed;
 
-struct htt_frag_desc_bank_cfg {
+struct htt_frag_desc_bank_cfg32 {
        u8 info; /* HTT_FRAG_DESC_BANK_CFG_INFO_ */
        u8 num_banks;
        u8 desc_size;
@@ -1360,6 +1423,15 @@ struct htt_frag_desc_bank_cfg {
        struct htt_q_state_conf q_state;
 } __packed;
 
+struct htt_frag_desc_bank_cfg64 {
+       u8 info; /* HTT_FRAG_DESC_BANK_CFG_INFO_ */
+       u8 num_banks;
+       u8 desc_size;
+       __le64 bank_base_addrs[HTT_FRAG_DESC_BANK_MAX];
+       struct htt_frag_desc_bank_id bank_id[HTT_FRAG_DESC_BANK_MAX];
+       struct htt_q_state_conf q_state;
+} __packed;
+
 #define HTT_TX_Q_STATE_ENTRY_COEFFICIENT       128
 #define HTT_TX_Q_STATE_ENTRY_FACTOR_MASK       0x3f
 #define HTT_TX_Q_STATE_ENTRY_FACTOR_LSB                0
@@ -1531,11 +1603,13 @@ struct htt_cmd {
                struct htt_ver_req ver_req;
                struct htt_mgmt_tx_desc mgmt_tx;
                struct htt_data_tx_desc data_tx;
-               struct htt_rx_ring_setup rx_setup;
+               struct htt_rx_ring_setup_32 rx_setup_32;
+               struct htt_rx_ring_setup_64 rx_setup_64;
                struct htt_stats_req stats_req;
                struct htt_oob_sync_req oob_sync_req;
                struct htt_aggr_conf aggr_conf;
-               struct htt_frag_desc_bank_cfg frag_desc_bank_cfg;
+               struct htt_frag_desc_bank_cfg32 frag_desc_bank_cfg32;
+               struct htt_frag_desc_bank_cfg64 frag_desc_bank_cfg64;
                struct htt_tx_fetch_resp tx_fetch_resp;
        };
 } __packed;
@@ -1593,13 +1667,20 @@ struct htt_peer_unmap_event {
        u16 peer_id;
 };
 
-struct ath10k_htt_txbuf {
+struct ath10k_htt_txbuf_32 {
        struct htt_data_tx_desc_frag frags[2];
        struct ath10k_htc_hdr htc_hdr;
        struct htt_cmd_hdr cmd_hdr;
        struct htt_data_tx_desc cmd_tx;
 } __packed;
 
+struct ath10k_htt_txbuf_64 {
+       struct htt_data_tx_desc_frag frags[2];
+       struct ath10k_htc_hdr htc_hdr;
+       struct htt_cmd_hdr cmd_hdr;
+       struct htt_data_tx_desc_64 cmd_tx;
+} __packed;
+
 struct ath10k_htt {
        struct ath10k *ar;
        enum ath10k_htc_ep_id eid;
@@ -1644,7 +1725,10 @@ struct ath10k_htt {
                 * rx buffers the host SW provides for the MAC HW to
                 * fill.
                 */
-               __le32 *paddrs_ring;
+               union {
+                       __le64 *paddrs_ring_64;
+                       __le32 *paddrs_ring_32;
+               };
 
                /*
                 * Base address of ring, as a "physical" device address
@@ -1721,12 +1805,20 @@ struct ath10k_htt {
 
        struct {
                dma_addr_t paddr;
-               struct htt_msdu_ext_desc *vaddr;
+               union {
+                       struct htt_msdu_ext_desc *vaddr_desc_32;
+                       struct htt_msdu_ext_desc_64 *vaddr_desc_64;
+               };
+               size_t size;
        } frag_desc;
 
        struct {
                dma_addr_t paddr;
-               struct ath10k_htt_txbuf *vaddr;
+               union {
+                       struct ath10k_htt_txbuf_32 *vaddr_txbuff_32;
+                       struct ath10k_htt_txbuf_64 *vaddr_txbuff_64;
+               };
+               size_t size;
        } txbuf;
 
        struct {
@@ -1741,8 +1833,29 @@ struct ath10k_htt {
        } tx_q_state;
 
        bool tx_mem_allocated;
+       const struct ath10k_htt_tx_ops *tx_ops;
+       const struct ath10k_htt_rx_ops *rx_ops;
 };
 
+struct ath10k_htt_tx_ops {
+       int (*htt_send_rx_ring_cfg)(struct ath10k_htt *htt);
+       int (*htt_send_frag_desc_bank_cfg)(struct ath10k_htt *htt);
+       int (*htt_alloc_frag_desc)(struct ath10k_htt *htt);
+       void (*htt_free_frag_desc)(struct ath10k_htt *htt);
+       int (*htt_tx)(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode,
+                     struct sk_buff *msdu);
+       int (*htt_alloc_txbuff)(struct ath10k_htt *htt);
+       void (*htt_free_txbuff)(struct ath10k_htt *htt);
+};
+
+struct ath10k_htt_rx_ops {
+       size_t (*htt_get_rx_ring_size)(struct ath10k_htt *htt);
+       void (*htt_config_paddrs_ring)(struct ath10k_htt *htt, void *vaddr);
+       void (*htt_set_paddrs_ring)(struct ath10k_htt *htt, dma_addr_t paddr,
+                                   int idx);
+       void* (*htt_get_vaddr_ring)(struct ath10k_htt *htt);
+       void (*htt_reset_paddrs_ring)(struct ath10k_htt *htt, int idx);
+};
 #define RX_HTT_HDR_STATUS_LEN 64
 
 /* This structure layout is programmed via rx ring setup
@@ -1820,8 +1933,6 @@ void ath10k_htt_htc_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb);
 bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb);
 int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt);
 int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie);
-int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt);
-int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt);
 int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt,
                                u8 max_subfrms_ampdu,
                                u8 max_subfrms_amsdu);
@@ -1846,11 +1957,9 @@ int ath10k_htt_tx_mgmt_inc_pending(struct ath10k_htt *htt, bool is_mgmt,
 int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb);
 void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id);
 int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu);
-int ath10k_htt_tx(struct ath10k_htt *htt,
-                 enum ath10k_hw_txrx_mode txmode,
-                 struct sk_buff *msdu);
 void ath10k_htt_rx_pktlog_completion_handler(struct ath10k *ar,
                                             struct sk_buff *skb);
 int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget);
-
+void ath10k_htt_set_tx_ops(struct ath10k_htt *htt);
+void ath10k_htt_set_rx_ops(struct ath10k_htt *htt);
 #endif
index 620ed7d..6d96f95 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -25,9 +25,6 @@
 
 #include <linux/log2.h>
 
-#define HTT_RX_RING_SIZE HTT_RX_RING_SIZE_MAX
-#define HTT_RX_RING_FILL_LEVEL (((HTT_RX_RING_SIZE) / 2) - 1)
-
 /* when under memory pressure rx ring refill may fail and needs a retry */
 #define HTT_RX_RING_REFILL_RETRY_MS 50
 
@@ -36,7 +33,7 @@
 static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb);
 
 static struct sk_buff *
-ath10k_htt_rx_find_skb_paddr(struct ath10k *ar, u32 paddr)
+ath10k_htt_rx_find_skb_paddr(struct ath10k *ar, u64 paddr)
 {
        struct ath10k_skb_rxcb *rxcb;
 
@@ -84,6 +81,60 @@ static void ath10k_htt_rx_ring_free(struct ath10k_htt *htt)
               htt->rx_ring.size * sizeof(htt->rx_ring.netbufs_ring[0]));
 }
 
+static size_t ath10k_htt_get_rx_ring_size_32(struct ath10k_htt *htt)
+{
+       return htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring_32);
+}
+
+static size_t ath10k_htt_get_rx_ring_size_64(struct ath10k_htt *htt)
+{
+       return htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring_64);
+}
+
+static void ath10k_htt_config_paddrs_ring_32(struct ath10k_htt *htt,
+                                            void *vaddr)
+{
+       htt->rx_ring.paddrs_ring_32 = vaddr;
+}
+
+static void ath10k_htt_config_paddrs_ring_64(struct ath10k_htt *htt,
+                                            void *vaddr)
+{
+       htt->rx_ring.paddrs_ring_64 = vaddr;
+}
+
+static void ath10k_htt_set_paddrs_ring_32(struct ath10k_htt *htt,
+                                         dma_addr_t paddr, int idx)
+{
+       htt->rx_ring.paddrs_ring_32[idx] = __cpu_to_le32(paddr);
+}
+
+static void ath10k_htt_set_paddrs_ring_64(struct ath10k_htt *htt,
+                                         dma_addr_t paddr, int idx)
+{
+       htt->rx_ring.paddrs_ring_64[idx] = __cpu_to_le64(paddr);
+}
+
+static void ath10k_htt_reset_paddrs_ring_32(struct ath10k_htt *htt, int idx)
+{
+       htt->rx_ring.paddrs_ring_32[idx] = 0;
+}
+
+static void ath10k_htt_reset_paddrs_ring_64(struct ath10k_htt *htt, int idx)
+{
+       htt->rx_ring.paddrs_ring_64[idx] = 0;
+}
+
+static void *ath10k_htt_get_vaddr_ring_32(struct ath10k_htt *htt)
+{
+       return (void *)htt->rx_ring.paddrs_ring_32;
+}
+
+static void *ath10k_htt_get_vaddr_ring_64(struct ath10k_htt *htt)
+{
+       return (void *)htt->rx_ring.paddrs_ring_64;
+}
+
 static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
 {
        struct htt_rx_desc *rx_desc;
@@ -129,13 +180,13 @@ static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
                rxcb = ATH10K_SKB_RXCB(skb);
                rxcb->paddr = paddr;
                htt->rx_ring.netbufs_ring[idx] = skb;
-               htt->rx_ring.paddrs_ring[idx] = __cpu_to_le32(paddr);
+               htt->rx_ops->htt_set_paddrs_ring(htt, paddr, idx);
                htt->rx_ring.fill_cnt++;
 
                if (htt->rx_ring.in_ord_rx) {
                        hash_add(htt->rx_ring.skb_table,
                                 &ATH10K_SKB_RXCB(skb)->hlist,
-                                (u32)paddr);
+                                paddr);
                }
 
                num--;
@@ -234,9 +285,8 @@ void ath10k_htt_rx_free(struct ath10k_htt *htt)
        ath10k_htt_rx_ring_free(htt);
 
        dma_free_coherent(htt->ar->dev,
-                         (htt->rx_ring.size *
-                          sizeof(htt->rx_ring.paddrs_ring)),
-                         htt->rx_ring.paddrs_ring,
+                         htt->rx_ops->htt_get_rx_ring_size(htt),
+                         htt->rx_ops->htt_get_vaddr_ring(htt),
                          htt->rx_ring.base_paddr);
 
        dma_free_coherent(htt->ar->dev,
@@ -263,7 +313,7 @@ static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
        idx = htt->rx_ring.sw_rd_idx.msdu_payld;
        msdu = htt->rx_ring.netbufs_ring[idx];
        htt->rx_ring.netbufs_ring[idx] = NULL;
-       htt->rx_ring.paddrs_ring[idx] = 0;
+       htt->rx_ops->htt_reset_paddrs_ring(htt, idx);
 
        idx++;
        idx &= htt->rx_ring.size_mask;
@@ -383,7 +433,7 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
 }
 
 static struct sk_buff *ath10k_htt_rx_pop_paddr(struct ath10k_htt *htt,
-                                              u32 paddr)
+                                              u64 paddr)
 {
        struct ath10k *ar = htt->ar;
        struct ath10k_skb_rxcb *rxcb;
@@ -408,12 +458,12 @@ static struct sk_buff *ath10k_htt_rx_pop_paddr(struct ath10k_htt *htt,
        return msdu;
 }
 
-static int ath10k_htt_rx_pop_paddr_list(struct ath10k_htt *htt,
-                                       struct htt_rx_in_ord_ind *ev,
-                                       struct sk_buff_head *list)
+static int ath10k_htt_rx_pop_paddr32_list(struct ath10k_htt *htt,
+                                         struct htt_rx_in_ord_ind *ev,
+                                         struct sk_buff_head *list)
 {
        struct ath10k *ar = htt->ar;
-       struct htt_rx_in_ord_msdu_desc *msdu_desc = ev->msdu_descs;
+       struct htt_rx_in_ord_msdu_desc *msdu_desc = ev->msdu_descs32;
        struct htt_rx_desc *rxd;
        struct sk_buff *msdu;
        int msdu_count;
@@ -458,11 +508,60 @@ static int ath10k_htt_rx_pop_paddr_list(struct ath10k_htt *htt,
        return 0;
 }
 
+static int ath10k_htt_rx_pop_paddr64_list(struct ath10k_htt *htt,
+                                         struct htt_rx_in_ord_ind *ev,
+                                         struct sk_buff_head *list)
+{
+       struct ath10k *ar = htt->ar;
+       struct htt_rx_in_ord_msdu_desc_ext *msdu_desc = ev->msdu_descs64;
+       struct htt_rx_desc *rxd;
+       struct sk_buff *msdu;
+       int msdu_count;
+       bool is_offload;
+       u64 paddr;
+
+       lockdep_assert_held(&htt->rx_ring.lock);
+
+       msdu_count = __le16_to_cpu(ev->msdu_count);
+       is_offload = !!(ev->info & HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
+
+       while (msdu_count--) {
+               paddr = __le64_to_cpu(msdu_desc->msdu_paddr);
+               msdu = ath10k_htt_rx_pop_paddr(htt, paddr);
+               if (!msdu) {
+                       __skb_queue_purge(list);
+                       return -ENOENT;
+               }
+
+               __skb_queue_tail(list, msdu);
+
+               if (!is_offload) {
+                       rxd = (void *)msdu->data;
+
+                       trace_ath10k_htt_rx_desc(ar, rxd, sizeof(*rxd));
+
+                       skb_put(msdu, sizeof(*rxd));
+                       skb_pull(msdu, sizeof(*rxd));
+                       skb_put(msdu, __le16_to_cpu(msdu_desc->msdu_len));
+
+                       if (!(__le32_to_cpu(rxd->attention.flags) &
+                             RX_ATTENTION_FLAGS_MSDU_DONE)) {
+                               ath10k_warn(htt->ar, "tried to pop an incomplete frame, oops!\n");
+                               return -EIO;
+                       }
+               }
+
+               msdu_desc++;
+       }
+
+       return 0;
+}
+
 int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
 {
        struct ath10k *ar = htt->ar;
        dma_addr_t paddr;
-       void *vaddr;
+       void *vaddr, *vaddr_ring;
        size_t size;
        struct timer_list *timer = &htt->rx_ring.refill_retry_timer;
 
@@ -473,7 +572,7 @@ int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
         */
        htt->rx_ring.size = HTT_RX_RING_SIZE;
        htt->rx_ring.size_mask = htt->rx_ring.size - 1;
-       htt->rx_ring.fill_level = HTT_RX_RING_FILL_LEVEL;
+       htt->rx_ring.fill_level = ar->hw_params.rx_ring_fill_level;
 
        if (!is_power_of_2(htt->rx_ring.size)) {
                ath10k_warn(ar, "htt rx ring size is not power of 2\n");
@@ -486,13 +585,13 @@ int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
        if (!htt->rx_ring.netbufs_ring)
                goto err_netbuf;
 
-       size = htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring);
+       size = htt->rx_ops->htt_get_rx_ring_size(htt);
 
-       vaddr = dma_alloc_coherent(htt->ar->dev, size, &paddr, GFP_KERNEL);
-       if (!vaddr)
+       vaddr_ring = dma_alloc_coherent(htt->ar->dev, size, &paddr, GFP_KERNEL);
+       if (!vaddr_ring)
                goto err_dma_ring;
 
-       htt->rx_ring.paddrs_ring = vaddr;
+       htt->rx_ops->htt_config_paddrs_ring(htt, vaddr_ring);
        htt->rx_ring.base_paddr = paddr;
 
        vaddr = dma_alloc_coherent(htt->ar->dev,
@@ -526,9 +625,8 @@ int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
 
 err_dma_idx:
        dma_free_coherent(htt->ar->dev,
-                         (htt->rx_ring.size *
-                          sizeof(htt->rx_ring.paddrs_ring)),
-                         htt->rx_ring.paddrs_ring,
+                         htt->rx_ops->htt_get_rx_ring_size(htt),
+                         vaddr_ring,
                          htt->rx_ring.base_paddr);
 err_dma_ring:
        kfree(htt->rx_ring.netbufs_ring);
@@ -1986,7 +2084,7 @@ static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
                   "htt rx in ord vdev %i peer %i tid %i offload %i frag %i msdu count %i\n",
                   vdev_id, peer_id, tid, offload, frag, msdu_count);
 
-       if (skb->len < msdu_count * sizeof(*resp->rx_in_ord_ind.msdu_descs)) {
+       if (skb->len < msdu_count * sizeof(*resp->rx_in_ord_ind.msdu_descs32)) {
                ath10k_warn(ar, "dropping invalid in order rx indication\n");
                return -EINVAL;
        }
@@ -1995,7 +2093,13 @@ static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
         * extracted and processed.
         */
        __skb_queue_head_init(&list);
-       ret = ath10k_htt_rx_pop_paddr_list(htt, &resp->rx_in_ord_ind, &list);
+       if (ar->hw_params.target_64bit)
+               ret = ath10k_htt_rx_pop_paddr64_list(htt, &resp->rx_in_ord_ind,
+                                                    &list);
+       else
+               ret = ath10k_htt_rx_pop_paddr32_list(htt, &resp->rx_in_ord_ind,
+                                                    &list);
+
        if (ret < 0) {
                ath10k_warn(ar, "failed to pop paddr list: %d\n", ret);
                htt->rx_confused = true;
@@ -2795,3 +2899,29 @@ exit:
        return done;
 }
 EXPORT_SYMBOL(ath10k_htt_txrx_compl_task);
+
+static const struct ath10k_htt_rx_ops htt_rx_ops_32 = {
+       .htt_get_rx_ring_size = ath10k_htt_get_rx_ring_size_32,
+       .htt_config_paddrs_ring = ath10k_htt_config_paddrs_ring_32,
+       .htt_set_paddrs_ring = ath10k_htt_set_paddrs_ring_32,
+       .htt_get_vaddr_ring = ath10k_htt_get_vaddr_ring_32,
+       .htt_reset_paddrs_ring = ath10k_htt_reset_paddrs_ring_32,
+};
+
+static const struct ath10k_htt_rx_ops htt_rx_ops_64 = {
+       .htt_get_rx_ring_size = ath10k_htt_get_rx_ring_size_64,
+       .htt_config_paddrs_ring = ath10k_htt_config_paddrs_ring_64,
+       .htt_set_paddrs_ring = ath10k_htt_set_paddrs_ring_64,
+       .htt_get_vaddr_ring = ath10k_htt_get_vaddr_ring_64,
+       .htt_reset_paddrs_ring = ath10k_htt_reset_paddrs_ring_64,
+};
+
+void ath10k_htt_set_rx_ops(struct ath10k_htt *htt)
+{
+       struct ath10k *ar = htt->ar;
+
+       if (ar->hw_params.target_64bit)
+               htt->rx_ops = &htt_rx_ops_64;
+       else
+               htt->rx_ops = &htt_rx_ops_32;
+}
index 685faac..d334b7b 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -229,50 +229,91 @@ void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id)
        idr_remove(&htt->pending_tx, msdu_id);
 }
 
-static void ath10k_htt_tx_free_cont_txbuf(struct ath10k_htt *htt)
+static void ath10k_htt_tx_free_cont_txbuf_32(struct ath10k_htt *htt)
 {
        struct ath10k *ar = htt->ar;
        size_t size;
 
-       if (!htt->txbuf.vaddr)
+       if (!htt->txbuf.vaddr_txbuff_32)
                return;
 
-       size = htt->max_num_pending_tx * sizeof(struct ath10k_htt_txbuf);
-       dma_free_coherent(ar->dev, size, htt->txbuf.vaddr, htt->txbuf.paddr);
-       htt->txbuf.vaddr = NULL;
+       size = htt->txbuf.size;
+       dma_free_coherent(ar->dev, size, htt->txbuf.vaddr_txbuff_32,
+                         htt->txbuf.paddr);
+       htt->txbuf.vaddr_txbuff_32 = NULL;
 }
 
-static int ath10k_htt_tx_alloc_cont_txbuf(struct ath10k_htt *htt)
+static int ath10k_htt_tx_alloc_cont_txbuf_32(struct ath10k_htt *htt)
 {
        struct ath10k *ar = htt->ar;
        size_t size;
 
-       size = htt->max_num_pending_tx * sizeof(struct ath10k_htt_txbuf);
-       htt->txbuf.vaddr = dma_alloc_coherent(ar->dev, size, &htt->txbuf.paddr,
-                                             GFP_KERNEL);
-       if (!htt->txbuf.vaddr)
+       size = htt->max_num_pending_tx *
+                       sizeof(struct ath10k_htt_txbuf_32);
+
+       htt->txbuf.vaddr_txbuff_32 = dma_alloc_coherent(ar->dev, size,
+                                                       &htt->txbuf.paddr,
+                                                       GFP_KERNEL);
+       if (!htt->txbuf.vaddr_txbuff_32)
                return -ENOMEM;
 
+       htt->txbuf.size = size;
+
        return 0;
 }
 
-static void ath10k_htt_tx_free_cont_frag_desc(struct ath10k_htt *htt)
+static void ath10k_htt_tx_free_cont_txbuf_64(struct ath10k_htt *htt)
 {
+       struct ath10k *ar = htt->ar;
        size_t size;
 
-       if (!htt->frag_desc.vaddr)
+       if (!htt->txbuf.vaddr_txbuff_64)
                return;
 
-       size = htt->max_num_pending_tx * sizeof(struct htt_msdu_ext_desc);
+       size = htt->txbuf.size;
+       dma_free_coherent(ar->dev, size, htt->txbuf.vaddr_txbuff_64,
+                         htt->txbuf.paddr);
+       htt->txbuf.vaddr_txbuff_64 = NULL;
+}
+
+static int ath10k_htt_tx_alloc_cont_txbuf_64(struct ath10k_htt *htt)
+{
+       struct ath10k *ar = htt->ar;
+       size_t size;
+
+       size = htt->max_num_pending_tx *
+                       sizeof(struct ath10k_htt_txbuf_64);
+
+       htt->txbuf.vaddr_txbuff_64 = dma_alloc_coherent(ar->dev, size,
+                                                       &htt->txbuf.paddr,
+                                                       GFP_KERNEL);
+       if (!htt->txbuf.vaddr_txbuff_64)
+               return -ENOMEM;
+
+       htt->txbuf.size = size;
+
+       return 0;
+}
+
+static void ath10k_htt_tx_free_cont_frag_desc_32(struct ath10k_htt *htt)
+{
+       size_t size;
+
+       if (!htt->frag_desc.vaddr_desc_32)
+               return;
+
+       size = htt->max_num_pending_tx *
+                       sizeof(struct htt_msdu_ext_desc);
 
        dma_free_coherent(htt->ar->dev,
                          size,
-                         htt->frag_desc.vaddr,
+                         htt->frag_desc.vaddr_desc_32,
                          htt->frag_desc.paddr);
-       htt->frag_desc.vaddr = NULL;
+
+       htt->frag_desc.vaddr_desc_32 = NULL;
 }
 
-static int ath10k_htt_tx_alloc_cont_frag_desc(struct ath10k_htt *htt)
+static int ath10k_htt_tx_alloc_cont_frag_desc_32(struct ath10k_htt *htt)
 {
        struct ath10k *ar = htt->ar;
        size_t size;
@@ -280,12 +321,57 @@ static int ath10k_htt_tx_alloc_cont_frag_desc(struct ath10k_htt *htt)
        if (!ar->hw_params.continuous_frag_desc)
                return 0;
 
-       size = htt->max_num_pending_tx * sizeof(struct htt_msdu_ext_desc);
-       htt->frag_desc.vaddr = dma_alloc_coherent(ar->dev, size,
-                                                 &htt->frag_desc.paddr,
-                                                 GFP_KERNEL);
-       if (!htt->frag_desc.vaddr)
+       size = htt->max_num_pending_tx *
+                       sizeof(struct htt_msdu_ext_desc);
+       htt->frag_desc.vaddr_desc_32 = dma_alloc_coherent(ar->dev, size,
+                                                         &htt->frag_desc.paddr,
+                                                         GFP_KERNEL);
+       if (!htt->frag_desc.vaddr_desc_32) {
+               ath10k_err(ar, "failed to alloc fragment desc memory\n");
                return -ENOMEM;
+       }
+       htt->frag_desc.size = size;
+
+       return 0;
+}
+
+static void ath10k_htt_tx_free_cont_frag_desc_64(struct ath10k_htt *htt)
+{
+       size_t size;
+
+       if (!htt->frag_desc.vaddr_desc_64)
+               return;
+
+       size = htt->max_num_pending_tx *
+                       sizeof(struct htt_msdu_ext_desc_64);
+
+       dma_free_coherent(htt->ar->dev,
+                         size,
+                         htt->frag_desc.vaddr_desc_64,
+                         htt->frag_desc.paddr);
+
+       htt->frag_desc.vaddr_desc_64 = NULL;
+}
+
+static int ath10k_htt_tx_alloc_cont_frag_desc_64(struct ath10k_htt *htt)
+{
+       struct ath10k *ar = htt->ar;
+       size_t size;
+
+       if (!ar->hw_params.continuous_frag_desc)
+               return 0;
+
+       size = htt->max_num_pending_tx *
+                       sizeof(struct htt_msdu_ext_desc_64);
+
+       htt->frag_desc.vaddr_desc_64 = dma_alloc_coherent(ar->dev, size,
+                                                         &htt->frag_desc.paddr,
+                                                         GFP_KERNEL);
+       if (!htt->frag_desc.vaddr_desc_64) {
+               ath10k_err(ar, "failed to alloc fragment desc memory\n");
+               return -ENOMEM;
+       }
+       htt->frag_desc.size = size;
 
        return 0;
 }
@@ -357,13 +443,13 @@ static int ath10k_htt_tx_alloc_buf(struct ath10k_htt *htt)
        struct ath10k *ar = htt->ar;
        int ret;
 
-       ret = ath10k_htt_tx_alloc_cont_txbuf(htt);
+       ret = htt->tx_ops->htt_alloc_txbuff(htt);
        if (ret) {
                ath10k_err(ar, "failed to alloc cont tx buffer: %d\n", ret);
                return ret;
        }
 
-       ret = ath10k_htt_tx_alloc_cont_frag_desc(htt);
+       ret = htt->tx_ops->htt_alloc_frag_desc(htt);
        if (ret) {
                ath10k_err(ar, "failed to alloc cont frag desc: %d\n", ret);
                goto free_txbuf;
@@ -387,10 +473,10 @@ free_txq:
        ath10k_htt_tx_free_txq(htt);
 
 free_frag_desc:
-       ath10k_htt_tx_free_cont_frag_desc(htt);
+       htt->tx_ops->htt_free_frag_desc(htt);
 
 free_txbuf:
-       ath10k_htt_tx_free_cont_txbuf(htt);
+       htt->tx_ops->htt_free_txbuff(htt);
 
        return ret;
 }
@@ -444,9 +530,9 @@ void ath10k_htt_tx_destroy(struct ath10k_htt *htt)
        if (!htt->tx_mem_allocated)
                return;
 
-       ath10k_htt_tx_free_cont_txbuf(htt);
+       htt->tx_ops->htt_free_txbuff(htt);
        ath10k_htt_tx_free_txq(htt);
-       ath10k_htt_tx_free_cont_frag_desc(htt);
+       htt->tx_ops->htt_free_frag_desc(htt);
        ath10k_htt_tx_free_txdone_fifo(htt);
        htt->tx_mem_allocated = false;
 }
@@ -545,12 +631,12 @@ int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie)
        return 0;
 }
 
-int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt)
+static int ath10k_htt_send_frag_desc_bank_cfg_32(struct ath10k_htt *htt)
 {
        struct ath10k *ar = htt->ar;
        struct sk_buff *skb;
        struct htt_cmd *cmd;
-       struct htt_frag_desc_bank_cfg *cfg;
+       struct htt_frag_desc_bank_cfg32 *cfg;
        int ret, size;
        u8 info;
 
@@ -562,7 +648,7 @@ int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt)
                return -EINVAL;
        }
 
-       size = sizeof(cmd->hdr) + sizeof(cmd->frag_desc_bank_cfg);
+       size = sizeof(cmd->hdr) + sizeof(cmd->frag_desc_bank_cfg32);
        skb = ath10k_htc_alloc_skb(ar, size);
        if (!skb)
                return -ENOMEM;
@@ -579,7 +665,7 @@ int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt)
                     ar->running_fw->fw_file.fw_features))
                info |= HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_VALID;
 
-       cfg = &cmd->frag_desc_bank_cfg;
+       cfg = &cmd->frag_desc_bank_cfg32;
        cfg->info = info;
        cfg->num_banks = 1;
        cfg->desc_size = sizeof(struct htt_msdu_ext_desc);
@@ -607,12 +693,112 @@ int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt)
        return 0;
 }
 
-int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt)
+static int ath10k_htt_send_frag_desc_bank_cfg_64(struct ath10k_htt *htt)
 {
        struct ath10k *ar = htt->ar;
        struct sk_buff *skb;
        struct htt_cmd *cmd;
-       struct htt_rx_ring_setup_ring *ring;
+       struct htt_frag_desc_bank_cfg64 *cfg;
+       int ret, size;
+       u8 info;
+
+       if (!ar->hw_params.continuous_frag_desc)
+               return 0;
+
+       if (!htt->frag_desc.paddr) {
+               ath10k_warn(ar, "invalid frag desc memory\n");
+               return -EINVAL;
+       }
+
+       size = sizeof(cmd->hdr) + sizeof(cmd->frag_desc_bank_cfg64);
+       skb = ath10k_htc_alloc_skb(ar, size);
+       if (!skb)
+               return -ENOMEM;
+
+       skb_put(skb, size);
+       cmd = (struct htt_cmd *)skb->data;
+       cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG;
+
+       info = 0;
+       info |= SM(htt->tx_q_state.type,
+                  HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_DEPTH_TYPE);
+
+       if (test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL,
+                    ar->running_fw->fw_file.fw_features))
+               info |= HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_VALID;
+
+       cfg = &cmd->frag_desc_bank_cfg64;
+       cfg->info = info;
+       cfg->num_banks = 1;
+       cfg->desc_size = sizeof(struct htt_msdu_ext_desc_64);
+       cfg->bank_base_addrs[0] =  __cpu_to_le64(htt->frag_desc.paddr);
+       cfg->bank_id[0].bank_min_id = 0;
+       cfg->bank_id[0].bank_max_id = __cpu_to_le16(htt->max_num_pending_tx -
+                                                   1);
+
+       cfg->q_state.paddr = cpu_to_le32(htt->tx_q_state.paddr);
+       cfg->q_state.num_peers = cpu_to_le16(htt->tx_q_state.num_peers);
+       cfg->q_state.num_tids = cpu_to_le16(htt->tx_q_state.num_tids);
+       cfg->q_state.record_size = HTT_TX_Q_STATE_ENTRY_SIZE;
+       cfg->q_state.record_multiplier = HTT_TX_Q_STATE_ENTRY_MULTIPLIER;
+
+       ath10k_dbg(ar, ATH10K_DBG_HTT, "htt frag desc bank cmd\n");
+
+       ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
+       if (ret) {
+               ath10k_warn(ar, "failed to send frag desc bank cfg request: %d\n",
+                           ret);
+               dev_kfree_skb_any(skb);
+               return ret;
+       }
+
+       return 0;
+}
+
+static void ath10k_htt_fill_rx_desc_offset_32(void *rx_ring)
+{
+       struct htt_rx_ring_setup_ring32 *ring =
+                       (struct htt_rx_ring_setup_ring32 *)rx_ring;
+
+#define desc_offset(x) (offsetof(struct htt_rx_desc, x) / 4)
+       ring->mac80211_hdr_offset = __cpu_to_le16(desc_offset(rx_hdr_status));
+       ring->msdu_payload_offset = __cpu_to_le16(desc_offset(msdu_payload));
+       ring->ppdu_start_offset = __cpu_to_le16(desc_offset(ppdu_start));
+       ring->ppdu_end_offset = __cpu_to_le16(desc_offset(ppdu_end));
+       ring->mpdu_start_offset = __cpu_to_le16(desc_offset(mpdu_start));
+       ring->mpdu_end_offset = __cpu_to_le16(desc_offset(mpdu_end));
+       ring->msdu_start_offset = __cpu_to_le16(desc_offset(msdu_start));
+       ring->msdu_end_offset = __cpu_to_le16(desc_offset(msdu_end));
+       ring->rx_attention_offset = __cpu_to_le16(desc_offset(attention));
+       ring->frag_info_offset = __cpu_to_le16(desc_offset(frag_info));
+#undef desc_offset
+}
+
+static void ath10k_htt_fill_rx_desc_offset_64(void *rx_ring)
+{
+       struct htt_rx_ring_setup_ring64 *ring =
+                       (struct htt_rx_ring_setup_ring64 *)rx_ring;
+
+#define desc_offset(x) (offsetof(struct htt_rx_desc, x) / 4)
+       ring->mac80211_hdr_offset = __cpu_to_le16(desc_offset(rx_hdr_status));
+       ring->msdu_payload_offset = __cpu_to_le16(desc_offset(msdu_payload));
+       ring->ppdu_start_offset = __cpu_to_le16(desc_offset(ppdu_start));
+       ring->ppdu_end_offset = __cpu_to_le16(desc_offset(ppdu_end));
+       ring->mpdu_start_offset = __cpu_to_le16(desc_offset(mpdu_start));
+       ring->mpdu_end_offset = __cpu_to_le16(desc_offset(mpdu_end));
+       ring->msdu_start_offset = __cpu_to_le16(desc_offset(msdu_start));
+       ring->msdu_end_offset = __cpu_to_le16(desc_offset(msdu_end));
+       ring->rx_attention_offset = __cpu_to_le16(desc_offset(attention));
+       ring->frag_info_offset = __cpu_to_le16(desc_offset(frag_info));
+#undef desc_offset
+}
+
+static int ath10k_htt_send_rx_ring_cfg_32(struct ath10k_htt *htt)
+{
+       struct ath10k *ar = htt->ar;
+       struct sk_buff *skb;
+       struct htt_cmd *cmd;
+       struct htt_rx_ring_setup_ring32 *ring;
        const int num_rx_ring = 1;
        u16 flags;
        u32 fw_idx;
@@ -626,7 +812,7 @@ int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt)
        BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE, 4));
        BUILD_BUG_ON((HTT_RX_BUF_SIZE & HTT_MAX_CACHE_LINE_SIZE_MASK) != 0);
 
-       len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup.hdr)
+       len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup_32.hdr)
            + (sizeof(*ring) * num_rx_ring);
        skb = ath10k_htc_alloc_skb(ar, len);
        if (!skb)
@@ -635,10 +821,10 @@ int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt)
        skb_put(skb, len);
 
        cmd = (struct htt_cmd *)skb->data;
-       ring = &cmd->rx_setup.rings[0];
+       ring = &cmd->rx_setup_32.rings[0];
 
        cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_RX_RING_CFG;
-       cmd->rx_setup.hdr.num_rings = 1;
+       cmd->rx_setup_32.hdr.num_rings = 1;
 
        /* FIXME: do we need all of this? */
        flags = 0;
@@ -669,21 +855,76 @@ int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt)
        ring->flags = __cpu_to_le16(flags);
        ring->fw_idx_init_val = __cpu_to_le16(fw_idx);
 
-#define desc_offset(x) (offsetof(struct htt_rx_desc, x) / 4)
+       ath10k_htt_fill_rx_desc_offset_32(ring);
+       ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
+       if (ret) {
+               dev_kfree_skb_any(skb);
+               return ret;
+       }
 
-       ring->mac80211_hdr_offset = __cpu_to_le16(desc_offset(rx_hdr_status));
-       ring->msdu_payload_offset = __cpu_to_le16(desc_offset(msdu_payload));
-       ring->ppdu_start_offset = __cpu_to_le16(desc_offset(ppdu_start));
-       ring->ppdu_end_offset = __cpu_to_le16(desc_offset(ppdu_end));
-       ring->mpdu_start_offset = __cpu_to_le16(desc_offset(mpdu_start));
-       ring->mpdu_end_offset = __cpu_to_le16(desc_offset(mpdu_end));
-       ring->msdu_start_offset = __cpu_to_le16(desc_offset(msdu_start));
-       ring->msdu_end_offset = __cpu_to_le16(desc_offset(msdu_end));
-       ring->rx_attention_offset = __cpu_to_le16(desc_offset(attention));
-       ring->frag_info_offset = __cpu_to_le16(desc_offset(frag_info));
+       return 0;
+}
 
-#undef desc_offset
+static int ath10k_htt_send_rx_ring_cfg_64(struct ath10k_htt *htt)
+{
+       struct ath10k *ar = htt->ar;
+       struct sk_buff *skb;
+       struct htt_cmd *cmd;
+       struct htt_rx_ring_setup_ring64 *ring;
+       const int num_rx_ring = 1;
+       u16 flags;
+       u32 fw_idx;
+       int len;
+       int ret;
+
+       /* HW expects the buffer to be an integral number of 4-byte
+        * "words"
+        */
+       BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE, 4));
+       BUILD_BUG_ON((HTT_RX_BUF_SIZE & HTT_MAX_CACHE_LINE_SIZE_MASK) != 0);
+
+       len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup_64.hdr)
+           + (sizeof(*ring) * num_rx_ring);
+       skb = ath10k_htc_alloc_skb(ar, len);
+       if (!skb)
+               return -ENOMEM;
+
+       skb_put(skb, len);
+
+       cmd = (struct htt_cmd *)skb->data;
+       ring = &cmd->rx_setup_64.rings[0];
+
+       cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_RX_RING_CFG;
+       cmd->rx_setup_64.hdr.num_rings = 1;
+
+       flags = 0;
+       flags |= HTT_RX_RING_FLAGS_MAC80211_HDR;
+       flags |= HTT_RX_RING_FLAGS_MSDU_PAYLOAD;
+       flags |= HTT_RX_RING_FLAGS_PPDU_START;
+       flags |= HTT_RX_RING_FLAGS_PPDU_END;
+       flags |= HTT_RX_RING_FLAGS_MPDU_START;
+       flags |= HTT_RX_RING_FLAGS_MPDU_END;
+       flags |= HTT_RX_RING_FLAGS_MSDU_START;
+       flags |= HTT_RX_RING_FLAGS_MSDU_END;
+       flags |= HTT_RX_RING_FLAGS_RX_ATTENTION;
+       flags |= HTT_RX_RING_FLAGS_FRAG_INFO;
+       flags |= HTT_RX_RING_FLAGS_UNICAST_RX;
+       flags |= HTT_RX_RING_FLAGS_MULTICAST_RX;
+       flags |= HTT_RX_RING_FLAGS_CTRL_RX;
+       flags |= HTT_RX_RING_FLAGS_MGMT_RX;
+       flags |= HTT_RX_RING_FLAGS_NULL_RX;
+       flags |= HTT_RX_RING_FLAGS_PHY_DATA_RX;
 
+       fw_idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
+
+       ring->fw_idx_shadow_reg_paddr = __cpu_to_le64(htt->rx_ring.alloc_idx.paddr);
+       ring->rx_ring_base_paddr = __cpu_to_le64(htt->rx_ring.base_paddr);
+       ring->rx_ring_len = __cpu_to_le16(htt->rx_ring.size);
+       ring->rx_ring_bufsize = __cpu_to_le16(HTT_RX_BUF_SIZE);
+       ring->flags = __cpu_to_le16(flags);
+       ring->fw_idx_init_val = __cpu_to_le16(fw_idx);
+
+       ath10k_htt_fill_rx_desc_offset_64(ring);
        ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
        if (ret) {
                dev_kfree_skb_any(skb);
@@ -895,8 +1136,9 @@ err:
        return res;
 }
 
-int ath10k_htt_tx(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode,
-                 struct sk_buff *msdu)
+static int ath10k_htt_tx_32(struct ath10k_htt *htt,
+                           enum ath10k_hw_txrx_mode txmode,
+                           struct sk_buff *msdu)
 {
        struct ath10k *ar = htt->ar;
        struct device *dev = ar->dev;
@@ -904,7 +1146,7 @@ int ath10k_htt_tx(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode,
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu);
        struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
        struct ath10k_hif_sg_item sg_items[2];
-       struct ath10k_htt_txbuf *txbuf;
+       struct ath10k_htt_txbuf_32 *txbuf;
        struct htt_data_tx_desc_frag *frags;
        bool is_eth = (txmode == ATH10K_HW_TXRX_ETHERNET);
        u8 vdev_id = ath10k_htt_tx_get_vdev_id(ar, msdu);
@@ -917,6 +1159,7 @@ int ath10k_htt_tx(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode,
        u32 frags_paddr = 0;
        u32 txbuf_paddr;
        struct htt_msdu_ext_desc *ext_desc = NULL;
+       struct htt_msdu_ext_desc *ext_desc_t = NULL;
 
        spin_lock_bh(&htt->tx_lock);
        res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
@@ -929,9 +1172,9 @@ int ath10k_htt_tx(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode,
        prefetch_len = min(htt->prefetch_len, msdu->len);
        prefetch_len = roundup(prefetch_len, 4);
 
-       txbuf = &htt->txbuf.vaddr[msdu_id];
+       txbuf = htt->txbuf.vaddr_txbuff_32 + msdu_id;
        txbuf_paddr = htt->txbuf.paddr +
-                     (sizeof(struct ath10k_htt_txbuf) * msdu_id);
+                     (sizeof(struct ath10k_htt_txbuf_32) * msdu_id);
 
        if ((ieee80211_is_action(hdr->frame_control) ||
             ieee80211_is_deauth(hdr->frame_control) ||
@@ -962,11 +1205,12 @@ int ath10k_htt_tx(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode,
                /* pass through */
        case ATH10K_HW_TXRX_ETHERNET:
                if (ar->hw_params.continuous_frag_desc) {
-                       memset(&htt->frag_desc.vaddr[msdu_id], 0,
+                       ext_desc_t = htt->frag_desc.vaddr_desc_32;
+                       memset(&ext_desc_t[msdu_id], 0,
                               sizeof(struct htt_msdu_ext_desc));
                        frags = (struct htt_data_tx_desc_frag *)
-                               &htt->frag_desc.vaddr[msdu_id].frags;
-                       ext_desc = &htt->frag_desc.vaddr[msdu_id];
+                               &ext_desc_t[msdu_id].frags;
+                       ext_desc = &ext_desc_t[msdu_id];
                        frags[0].tword_addr.paddr_lo =
                                __cpu_to_le32(skb_cb->paddr);
                        frags[0].tword_addr.paddr_hi = 0;
@@ -1055,9 +1299,9 @@ int ath10k_htt_tx(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode,
 
        trace_ath10k_htt_tx(ar, msdu_id, msdu->len, vdev_id, tid);
        ath10k_dbg(ar, ATH10K_DBG_HTT,
-                  "htt tx flags0 %hhu flags1 %hu len %d id %hu frags_paddr %08x, msdu_paddr %08x vdev %hhu tid %hhu freq %hu\n",
-                  flags0, flags1, msdu->len, msdu_id, frags_paddr,
-                  (u32)skb_cb->paddr, vdev_id, tid, freq);
+                  "htt tx flags0 %hhu flags1 %hu len %d id %hu frags_paddr %pad, msdu_paddr %pad vdev %hhu tid %hhu freq %hu\n",
+                  flags0, flags1, msdu->len, msdu_id, &frags_paddr,
+                  &skb_cb->paddr, vdev_id, tid, freq);
        ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ",
                        msdu->data, msdu->len);
        trace_ath10k_tx_hdr(ar, msdu->data, msdu->len);
@@ -1093,3 +1337,239 @@ err_free_msdu_id:
 err:
        return res;
 }
+
+static int ath10k_htt_tx_64(struct ath10k_htt *htt,
+                           enum ath10k_hw_txrx_mode txmode,
+                           struct sk_buff *msdu)
+{
+       struct ath10k *ar = htt->ar;
+       struct device *dev = ar->dev;
+       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu);
+       struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
+       struct ath10k_hif_sg_item sg_items[2];
+       struct ath10k_htt_txbuf_64 *txbuf;
+       struct htt_data_tx_desc_frag *frags;
+       bool is_eth = (txmode == ATH10K_HW_TXRX_ETHERNET);
+       u8 vdev_id = ath10k_htt_tx_get_vdev_id(ar, msdu);
+       u8 tid = ath10k_htt_tx_get_tid(msdu, is_eth);
+       int prefetch_len;
+       int res;
+       u8 flags0 = 0;
+       u16 msdu_id, flags1 = 0;
+       u16 freq = 0;
+       dma_addr_t frags_paddr = 0;
+       u32 txbuf_paddr;
+       struct htt_msdu_ext_desc_64 *ext_desc = NULL;
+       struct htt_msdu_ext_desc_64 *ext_desc_t = NULL;
+
+       spin_lock_bh(&htt->tx_lock);
+       res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
+       spin_unlock_bh(&htt->tx_lock);
+       if (res < 0)
+               goto err;
+
+       msdu_id = res;
+
+       prefetch_len = min(htt->prefetch_len, msdu->len);
+       prefetch_len = roundup(prefetch_len, 4);
+
+       txbuf = htt->txbuf.vaddr_txbuff_64 + msdu_id;
+       txbuf_paddr = htt->txbuf.paddr +
+                     (sizeof(struct ath10k_htt_txbuf_64) * msdu_id);
+
+       if ((ieee80211_is_action(hdr->frame_control) ||
+            ieee80211_is_deauth(hdr->frame_control) ||
+            ieee80211_is_disassoc(hdr->frame_control)) &&
+            ieee80211_has_protected(hdr->frame_control)) {
+               skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
+       } else if (!(skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT) &&
+                  txmode == ATH10K_HW_TXRX_RAW &&
+                  ieee80211_has_protected(hdr->frame_control)) {
+               skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
+       }
+
+       skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
+                                      DMA_TO_DEVICE);
+       res = dma_mapping_error(dev, skb_cb->paddr);
+       if (res) {
+               res = -EIO;
+               goto err_free_msdu_id;
+       }
+
+       if (unlikely(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN))
+               freq = ar->scan.roc_freq;
+
+       switch (txmode) {
+       case ATH10K_HW_TXRX_RAW:
+       case ATH10K_HW_TXRX_NATIVE_WIFI:
+               flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
+               /* pass through */
+       case ATH10K_HW_TXRX_ETHERNET:
+               if (ar->hw_params.continuous_frag_desc) {
+                       ext_desc_t = htt->frag_desc.vaddr_desc_64;
+                       memset(&ext_desc_t[msdu_id], 0,
+                              sizeof(struct htt_msdu_ext_desc_64));
+                       frags = (struct htt_data_tx_desc_frag *)
+                               &ext_desc_t[msdu_id].frags;
+                       ext_desc = &ext_desc_t[msdu_id];
+                       frags[0].tword_addr.paddr_lo =
+                               __cpu_to_le32(skb_cb->paddr);
+                       frags[0].tword_addr.paddr_hi =
+                               __cpu_to_le16(upper_32_bits(skb_cb->paddr));
+                       frags[0].tword_addr.len_16 = __cpu_to_le16(msdu->len);
+
+                       frags_paddr =  htt->frag_desc.paddr +
+                          (sizeof(struct htt_msdu_ext_desc_64) * msdu_id);
+               } else {
+                       frags = txbuf->frags;
+                       frags[0].tword_addr.paddr_lo =
+                                               __cpu_to_le32(skb_cb->paddr);
+                       frags[0].tword_addr.paddr_hi =
+                               __cpu_to_le16(upper_32_bits(skb_cb->paddr));
+                       frags[0].tword_addr.len_16 = __cpu_to_le16(msdu->len);
+                       frags[1].tword_addr.paddr_lo = 0;
+                       frags[1].tword_addr.paddr_hi = 0;
+                       frags[1].tword_addr.len_16 = 0;
+               }
+               flags0 |= SM(txmode, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
+               break;
+       case ATH10K_HW_TXRX_MGMT:
+               flags0 |= SM(ATH10K_HW_TXRX_MGMT,
+                            HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
+               flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
+
+               frags_paddr = skb_cb->paddr;
+               break;
+       }
+
+       /* Normally all commands go through HTC which manages tx credits for
+        * each endpoint and notifies when tx is completed.
+        *
+        * HTT endpoint is creditless so there's no need to care about HTC
+        * flags. In that case it is trivial to fill the HTC header here.
+        *
+        * MSDU transmission is considered completed upon HTT event. This
+        * implies no relevant resources can be freed until after the event is
+        * received. That's why HTC tx completion handler itself is ignored by
+        * setting NULL to transfer_context for all sg items.
+        *
+        * There is simply no point in pushing HTT TX_FRM through HTC tx path
+        * as it's a waste of resources. By bypassing HTC it is possible to
+        * avoid extra memory allocations, compress data structures and thus
+        * improve performance.
+        */
+
+       txbuf->htc_hdr.eid = htt->eid;
+       txbuf->htc_hdr.len = __cpu_to_le16(sizeof(txbuf->cmd_hdr) +
+                                          sizeof(txbuf->cmd_tx) +
+                                          prefetch_len);
+       txbuf->htc_hdr.flags = 0;
+
+       if (skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT)
+               flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;
+
+       flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
+       flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID);
+       if (msdu->ip_summed == CHECKSUM_PARTIAL &&
+           !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
+               flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;
+               flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;
+               if (ar->hw_params.continuous_frag_desc)
+                       ext_desc->flags |= HTT_MSDU_CHECKSUM_ENABLE;
+       }
+
+       /* Prevent firmware from sending up tx inspection requests. There's
+        * nothing ath10k can do with frames requested for inspection so force
+        * it to simply rely a regular tx completion with discard status.
+        */
+       flags1 |= HTT_DATA_TX_DESC_FLAGS1_POSTPONED;
+
+       txbuf->cmd_hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM;
+       txbuf->cmd_tx.flags0 = flags0;
+       txbuf->cmd_tx.flags1 = __cpu_to_le16(flags1);
+       txbuf->cmd_tx.len = __cpu_to_le16(msdu->len);
+       txbuf->cmd_tx.id = __cpu_to_le16(msdu_id);
+
+       /* fill fragment descriptor */
+       txbuf->cmd_tx.frags_paddr = __cpu_to_le64(frags_paddr);
+       if (ath10k_mac_tx_frm_has_freq(ar)) {
+               txbuf->cmd_tx.offchan_tx.peerid =
+                               __cpu_to_le16(HTT_INVALID_PEERID);
+               txbuf->cmd_tx.offchan_tx.freq =
+                               __cpu_to_le16(freq);
+       } else {
+               txbuf->cmd_tx.peerid =
+                               __cpu_to_le32(HTT_INVALID_PEERID);
+       }
+
+       trace_ath10k_htt_tx(ar, msdu_id, msdu->len, vdev_id, tid);
+       ath10k_dbg(ar, ATH10K_DBG_HTT,
+                  "htt tx flags0 %hhu flags1 %hu len %d id %hu frags_paddr %pad, msdu_paddr %pad vdev %hhu tid %hhu freq %hu\n",
+                  flags0, flags1, msdu->len, msdu_id, &frags_paddr,
+                  &skb_cb->paddr, vdev_id, tid, freq);
+       ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ",
+                       msdu->data, msdu->len);
+       trace_ath10k_tx_hdr(ar, msdu->data, msdu->len);
+       trace_ath10k_tx_payload(ar, msdu->data, msdu->len);
+
+       sg_items[0].transfer_id = 0;
+       sg_items[0].transfer_context = NULL;
+       sg_items[0].vaddr = &txbuf->htc_hdr;
+       sg_items[0].paddr = txbuf_paddr +
+                           sizeof(txbuf->frags);
+       sg_items[0].len = sizeof(txbuf->htc_hdr) +
+                         sizeof(txbuf->cmd_hdr) +
+                         sizeof(txbuf->cmd_tx);
+
+       sg_items[1].transfer_id = 0;
+       sg_items[1].transfer_context = NULL;
+       sg_items[1].vaddr = msdu->data;
+       sg_items[1].paddr = skb_cb->paddr;
+       sg_items[1].len = prefetch_len;
+
+       res = ath10k_hif_tx_sg(htt->ar,
+                              htt->ar->htc.endpoint[htt->eid].ul_pipe_id,
+                              sg_items, ARRAY_SIZE(sg_items));
+       if (res)
+               goto err_unmap_msdu;
+
+       return 0;
+
+err_unmap_msdu:
+       dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
+err_free_msdu_id:
+       ath10k_htt_tx_free_msdu_id(htt, msdu_id);
+err:
+       return res;
+}
+
+static const struct ath10k_htt_tx_ops htt_tx_ops_32 = {
+       .htt_send_rx_ring_cfg = ath10k_htt_send_rx_ring_cfg_32,
+       .htt_send_frag_desc_bank_cfg = ath10k_htt_send_frag_desc_bank_cfg_32,
+       .htt_alloc_frag_desc = ath10k_htt_tx_alloc_cont_frag_desc_32,
+       .htt_free_frag_desc = ath10k_htt_tx_free_cont_frag_desc_32,
+       .htt_tx = ath10k_htt_tx_32,
+       .htt_alloc_txbuff = ath10k_htt_tx_alloc_cont_txbuf_32,
+       .htt_free_txbuff = ath10k_htt_tx_free_cont_txbuf_32,
+};
+
+static const struct ath10k_htt_tx_ops htt_tx_ops_64 = {
+       .htt_send_rx_ring_cfg = ath10k_htt_send_rx_ring_cfg_64,
+       .htt_send_frag_desc_bank_cfg = ath10k_htt_send_frag_desc_bank_cfg_64,
+       .htt_alloc_frag_desc = ath10k_htt_tx_alloc_cont_frag_desc_64,
+       .htt_free_frag_desc = ath10k_htt_tx_free_cont_frag_desc_64,
+       .htt_tx = ath10k_htt_tx_64,
+       .htt_alloc_txbuff = ath10k_htt_tx_alloc_cont_txbuf_64,
+       .htt_free_txbuff = ath10k_htt_tx_free_cont_txbuf_64,
+};
+
+void ath10k_htt_set_tx_ops(struct ath10k_htt *htt)
+{
+       struct ath10k *ar = htt->ar;
+
+       if (ar->hw_params.target_64bit)
+               htt->tx_ops = &htt_tx_ops_64;
+       else
+               htt->tx_ops = &htt_tx_ops_32;
+}
index c31eea6..497ac33 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014-2015 Qualcomm Atheros, Inc.
+ * Copyright (c) 2014-2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
index 90ad39b..6203bc6 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -561,6 +561,12 @@ struct ath10k_hw_params {
        u32 num_peers;
        u32 ast_skid_limit;
        u32 num_wds_entries;
+
+       /* Targets supporting physical addressing capability above 32-bits */
+       bool target_64bit;
+
+       /* Target rx ring fill level */
+       u32 rx_ring_fill_level;
 };
 
 struct htt_rx_desc;
@@ -882,6 +888,7 @@ ath10k_rx_desc_get_l3_pad_bytes(struct ath10k_hw_params *hw,
 #define PCIE_INTR_CLR_ADDRESS                  ar->regs->pcie_intr_clr_address
 #define SCRATCH_3_ADDRESS                      ar->regs->scratch_3_address
 #define CPU_INTR_ADDRESS                       0x0010
+#define FW_RAM_CONFIG_ADDRESS                  0x0018
 
 #define CCNT_TO_MSEC(ar, x) ((x) / ar->hw_params.channel_counters_freq_hz)
 
index 75726f1..ebb3f1b 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -3597,7 +3597,7 @@ static int ath10k_mac_tx_submit(struct ath10k *ar,
 
        switch (txpath) {
        case ATH10K_MAC_TX_HTT:
-               ret = ath10k_htt_tx(htt, txmode, skb);
+               ret = htt->tx_ops->htt_tx(htt, txmode, skb);
                break;
        case ATH10K_MAC_TX_HTT_MGMT:
                ret = ath10k_htt_mgmt_tx(htt, skb);
@@ -8294,7 +8294,8 @@ int ath10k_mac_register(struct ath10k *ar)
        if (test_bit(WMI_SERVICE_TDLS, ar->wmi.svc_map) ||
            test_bit(WMI_SERVICE_TDLS_EXPLICIT_MODE_ONLY, ar->wmi.svc_map)) {
                ar->hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
-               ieee80211_hw_set(ar->hw, TDLS_WIDER_BW);
+               if (test_bit(WMI_SERVICE_TDLS_WIDER_BANDWIDTH, ar->wmi.svc_map))
+                       ieee80211_hw_set(ar->hw, TDLS_WIDER_BW);
        }
 
        ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
index 553747b..81f8d6c 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
index ffea348..8abaccc 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -23,6 +23,7 @@
 
 #include "core.h"
 #include "debug.h"
+#include "coredump.h"
 
 #include "targaddrs.h"
 #include "bmi.h"
@@ -51,6 +52,11 @@ MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)");
 #define ATH10K_PCI_TARGET_WAIT 3000
 #define ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS 3
 
+/* Maximum number of bytes that can be handled atomically by
+ * diag read and write.
+ */
+#define ATH10K_DIAG_TRANSFER_LIMIT     0x5000
+
 static const struct pci_device_id ath10k_pci_id_table[] = {
        { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
        { PCI_VDEVICE(ATHEROS, QCA6164_2_1_DEVICE_ID) }, /* PCI-E QCA6164 V2.1 */
@@ -785,7 +791,7 @@ static int __ath10k_pci_rx_post_buf(struct ath10k_pci_pipe *pipe)
        ATH10K_SKB_RXCB(skb)->paddr = paddr;
 
        spin_lock_bh(&ce->ce_lock);
-       ret = __ath10k_ce_rx_post_buf(ce_pipe, skb, paddr);
+       ret = ce_pipe->ops->ce_rx_post_buf(ce_pipe, skb, paddr);
        spin_unlock_bh(&ce->ce_lock);
        if (ret) {
                dma_unmap_single(ar->dev, paddr, skb->len + skb_tailroom(skb),
@@ -923,7 +929,7 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
                nbytes = min_t(unsigned int, remaining_bytes,
                               DIAG_TRANSFER_LIMIT);
 
-               ret = __ath10k_ce_rx_post_buf(ce_diag, &ce_data, ce_data);
+               ret = ce_diag->ops->ce_rx_post_buf(ce_diag, &ce_data, ce_data);
                if (ret != 0)
                        goto done;
 
@@ -1089,7 +1095,7 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
                nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
 
                /* Set up to receive directly into Target(!) address */
-               ret = __ath10k_ce_rx_post_buf(ce_diag, &address, address);
+               ret = ce_diag->ops->ce_rx_post_buf(ce_diag, &address, address);
                if (ret != 0)
                        goto done;
 
@@ -1461,6 +1467,218 @@ static void ath10k_pci_dump_registers(struct ath10k *ar,
                crash_data->registers[i] = reg_dump_values[i];
 }
 
+static int ath10k_pci_dump_memory_section(struct ath10k *ar,
+                                         const struct ath10k_mem_region *mem_region,
+                                         u8 *buf, size_t buf_len)
+{
+       const struct ath10k_mem_section *cur_section, *next_section;
+       unsigned int count, section_size, skip_size;
+       int ret, i, j;
+
+       if (!mem_region || !buf)
+               return 0;
+
+       if (mem_region->section_table.size < 0)
+               return 0;
+
+       cur_section = &mem_region->section_table.sections[0];
+
+       if (mem_region->start > cur_section->start) {
+               ath10k_warn(ar, "incorrect memdump region 0x%x with section start addrress 0x%x.\n",
+                           mem_region->start, cur_section->start);
+               return 0;
+       }
+
+       skip_size = cur_section->start - mem_region->start;
+
+       /* fill the gap between the first register section and register
+        * start address
+        */
+       for (i = 0; i < skip_size; i++) {
+               *buf = ATH10K_MAGIC_NOT_COPIED;
+               buf++;
+       }
+
+       count = 0;
+
+       for (i = 0; cur_section != NULL; i++) {
+               section_size = cur_section->end - cur_section->start;
+
+               if (section_size <= 0) {
+                       ath10k_warn(ar, "incorrect ramdump format with start address 0x%x and stop address 0x%x\n",
+                                   cur_section->start,
+                                   cur_section->end);
+                       break;
+               }
+
+               if ((i + 1) == mem_region->section_table.size) {
+                       /* last section */
+                       next_section = NULL;
+                       skip_size = 0;
+               } else {
+                       next_section = cur_section + 1;
+
+                       if (cur_section->end > next_section->start) {
+                               ath10k_warn(ar, "next ramdump section 0x%x is smaller than current end address 0x%x\n",
+                                           next_section->start,
+                                           cur_section->end);
+                               break;
+                       }
+
+                       skip_size = next_section->start - cur_section->end;
+               }
+
+               if (buf_len < (skip_size + section_size)) {
+                       ath10k_warn(ar, "ramdump buffer is too small: %zu\n", buf_len);
+                       break;
+               }
+
+               buf_len -= skip_size + section_size;
+
+               /* read section to dest memory */
+               ret = ath10k_pci_diag_read_mem(ar, cur_section->start,
+                                              buf, section_size);
+               if (ret) {
+                       ath10k_warn(ar, "failed to read ramdump from section 0x%x: %d\n",
+                                   cur_section->start, ret);
+                       break;
+               }
+
+               buf += section_size;
+               count += section_size;
+
+               /* fill in the gap between this section and the next */
+               for (j = 0; j < skip_size; j++) {
+                       *buf = ATH10K_MAGIC_NOT_COPIED;
+                       buf++;
+               }
+
+               count += skip_size;
+
+               if (!next_section)
+                       /* this was the last section */
+                       break;
+
+               cur_section = next_section;
+       }
+
+       return count;
+}
+
+static int ath10k_pci_set_ram_config(struct ath10k *ar, u32 config)
+{
+       u32 val;
+
+       ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
+                          FW_RAM_CONFIG_ADDRESS, config);
+
+       val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
+                               FW_RAM_CONFIG_ADDRESS);
+       if (val != config) {
+               ath10k_warn(ar, "failed to set RAM config from 0x%x to 0x%x\n",
+                           val, config);
+               return -EIO;
+       }
+
+       return 0;
+}
+
+static void ath10k_pci_dump_memory(struct ath10k *ar,
+                                  struct ath10k_fw_crash_data *crash_data)
+{
+       const struct ath10k_hw_mem_layout *mem_layout;
+       const struct ath10k_mem_region *current_region;
+       struct ath10k_dump_ram_data_hdr *hdr;
+       u32 count, shift;
+       size_t buf_len;
+       int ret, i;
+       u8 *buf;
+
+       lockdep_assert_held(&ar->data_lock);
+
+       if (!crash_data)
+               return;
+
+       mem_layout = ath10k_coredump_get_mem_layout(ar);
+       if (!mem_layout)
+               return;
+
+       current_region = &mem_layout->region_table.regions[0];
+
+       buf = crash_data->ramdump_buf;
+       buf_len = crash_data->ramdump_buf_len;
+
+       memset(buf, 0, buf_len);
+
+       for (i = 0; i < mem_layout->region_table.size; i++) {
+               count = 0;
+
+               if (current_region->len > buf_len) {
+                       ath10k_warn(ar, "memory region %s size %d is larger that remaining ramdump buffer size %zu\n",
+                                   current_region->name,
+                                   current_region->len,
+                                   buf_len);
+                       break;
+               }
+
+               /* To get IRAM dump, the host driver needs to switch target
+                * ram config from DRAM to IRAM.
+                */
+               if (current_region->type == ATH10K_MEM_REGION_TYPE_IRAM1 ||
+                   current_region->type == ATH10K_MEM_REGION_TYPE_IRAM2) {
+                       shift = current_region->start >> 20;
+
+                       ret = ath10k_pci_set_ram_config(ar, shift);
+                       if (ret) {
+                               ath10k_warn(ar, "failed to switch ram config to IRAM for section %s: %d\n",
+                                           current_region->name, ret);
+                               break;
+                       }
+               }
+
+               /* Reserve space for the header. */
+               hdr = (void *)buf;
+               buf += sizeof(*hdr);
+               buf_len -= sizeof(*hdr);
+
+               if (current_region->section_table.size > 0) {
+                       /* Copy each section individually. */
+                       count = ath10k_pci_dump_memory_section(ar,
+                                                              current_region,
+                                                              buf,
+                                                              current_region->len);
+               } else {
+                       /* No individiual memory sections defined so we can
+                        * copy the entire memory region.
+                        */
+                       ret = ath10k_pci_diag_read_mem(ar,
+                                                      current_region->start,
+                                                      buf,
+                                                      current_region->len);
+                       if (ret) {
+                               ath10k_warn(ar, "failed to copy ramdump region %s: %d\n",
+                                           current_region->name, ret);
+                               break;
+                       }
+
+                       count = current_region->len;
+               }
+
+               hdr->region_type = cpu_to_le32(current_region->type);
+               hdr->start = cpu_to_le32(current_region->start);
+               hdr->length = cpu_to_le32(count);
+
+               if (count == 0)
+                       /* Note: the header remains, just with zero length. */
+                       break;
+
+               buf += count;
+               buf_len -= count;
+
+               current_region++;
+       }
+}
+
 static void ath10k_pci_fw_crashed_dump(struct ath10k *ar)
 {
        struct ath10k_fw_crash_data *crash_data;
@@ -1470,7 +1688,7 @@ static void ath10k_pci_fw_crashed_dump(struct ath10k *ar)
 
        ar->stats.fw_crash_counter++;
 
-       crash_data = ath10k_debug_get_new_fw_crash_data(ar);
+       crash_data = ath10k_coredump_new(ar);
 
        if (crash_data)
                scnprintf(guid, sizeof(guid), "%pUl", &crash_data->guid);
@@ -1481,6 +1699,7 @@ static void ath10k_pci_fw_crashed_dump(struct ath10k *ar)
        ath10k_print_driver_info(ar);
        ath10k_pci_dump_registers(ar, crash_data);
        ath10k_ce_dump_registers(ar, crash_data);
+       ath10k_pci_dump_memory(ar, crash_data);
 
        spin_unlock_bh(&ar->data_lock);
 
@@ -1858,7 +2077,7 @@ int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
 
        ret = ath10k_pci_bmi_wait(ar, ce_tx, ce_rx, &xfer);
        if (ret) {
-               u32 unused_buffer;
+               dma_addr_t unused_buffer;
                unsigned int unused_nbytes;
                unsigned int unused_id;
 
@@ -1871,7 +2090,7 @@ int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
 
 err_resp:
        if (resp) {
-               u32 unused_buffer;
+               dma_addr_t unused_buffer;
 
                ath10k_ce_revoke_recv_next(ce_rx, NULL, &unused_buffer);
                dma_unmap_single(ar->dev, resp_paddr,
index 08704fb..e52fd83 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
index 28da143..545deb6 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -210,6 +210,10 @@ struct rx_frag_info {
        u8 ring1_more_count;
        u8 ring2_more_count;
        u8 ring3_more_count;
+       u8 ring4_more_count;
+       u8 ring5_more_count;
+       u8 ring6_more_count;
+       u8 ring7_more_count;
 } __packed;
 
 /*
@@ -471,10 +475,16 @@ struct rx_msdu_start_qca99x0 {
        __le32 info2; /* %RX_MSDU_START_INFO2_ */
 } __packed;
 
+struct rx_msdu_start_wcn3990 {
+       __le32 info2; /* %RX_MSDU_START_INFO2_ */
+       __le32 info3; /* %RX_MSDU_START_INFO3_ */
+} __packed;
+
 struct rx_msdu_start {
        struct rx_msdu_start_common common;
        union {
                struct rx_msdu_start_qca99x0 qca99x0;
+               struct rx_msdu_start_wcn3990 wcn3990;
        } __packed;
 } __packed;
 
@@ -595,10 +605,23 @@ struct rx_msdu_end_qca99x0 {
        __le32 info2;
 } __packed;
 
+struct rx_msdu_end_wcn3990 {
+       __le32 ipv6_crc;
+       __le32 tcp_seq_no;
+       __le32 tcp_ack_no;
+       __le32 info1;
+       __le32 info2;
+       __le32 rule_indication_0;
+       __le32 rule_indication_1;
+       __le32 rule_indication_2;
+       __le32 rule_indication_3;
+} __packed;
+
 struct rx_msdu_end {
        struct rx_msdu_end_common common;
        union {
                struct rx_msdu_end_qca99x0 qca99x0;
+               struct rx_msdu_end_wcn3990 wcn3990;
        } __packed;
 } __packed;
 
@@ -963,6 +986,12 @@ struct rx_pkt_end {
        __le32 phy_timestamp_2;
 } __packed;
 
+struct rx_pkt_end_wcn3990 {
+       __le32 info0; /* %RX_PKT_END_INFO0_ */
+       __le64 phy_timestamp_1;
+       __le64 phy_timestamp_2;
+} __packed;
+
 #define RX_LOCATION_INFO0_RTT_FAC_LEGACY_MASK          0x00003fff
 #define RX_LOCATION_INFO0_RTT_FAC_LEGACY_LSB           0
 #define RX_LOCATION_INFO0_RTT_FAC_VHT_MASK             0x1fff8000
@@ -998,6 +1027,12 @@ struct rx_location_info {
        __le32 rx_location_info1; /* %RX_LOCATION_INFO1_ */
 } __packed;
 
+struct rx_location_info_wcn3990 {
+       __le32 rx_location_info0; /* %RX_LOCATION_INFO0_ */
+       __le32 rx_location_info1; /* %RX_LOCATION_INFO1_ */
+       __le32 rx_location_info2; /* %RX_LOCATION_INFO2_ */
+} __packed;
+
 enum rx_phy_ppdu_end_info0 {
        RX_PHY_PPDU_END_INFO0_ERR_RADAR           = BIT(2),
        RX_PHY_PPDU_END_INFO0_ERR_RX_ABORT        = BIT(3),
@@ -1086,6 +1121,20 @@ struct rx_ppdu_end_qca9984 {
        __le16 info1; /* %RX_PPDU_END_INFO1_ */
 } __packed;
 
+struct rx_ppdu_end_wcn3990 {
+       struct rx_pkt_end_wcn3990 rx_pkt_end;
+       struct rx_location_info_wcn3990 rx_location_info;
+       struct rx_phy_ppdu_end rx_phy_ppdu_end;
+       __le32 rx_timing_offset;
+       __le32 reserved_info_0;
+       __le32 reserved_info_1;
+       __le32 rx_antenna_info;
+       __le32 rx_coex_info;
+       __le32 rx_mpdu_cnt_info;
+       __le64 phy_timestamp_tx;
+       __le32 rx_bb_length;
+} __packed;
+
 struct rx_ppdu_end {
        struct rx_ppdu_end_common common;
        union {
@@ -1093,6 +1142,7 @@ struct rx_ppdu_end {
                struct rx_ppdu_end_qca6174 qca6174;
                struct rx_ppdu_end_qca99x0 qca99x0;
                struct rx_ppdu_end_qca9984 qca9984;
+               struct rx_ppdu_end_wcn3990 wcn3990;
        } __packed;
 } __packed;
 
index 2048b1e..af6995d 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2013-2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
index b2a2e8a..13276f4 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2013-2015 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
index adf4592..e7f57ef 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015 Qualcomm Atheros, Inc.
+ * Copyright (c) 2015-2016 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
index f5dc047..fa602f1 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015 Qualcomm Atheros, Inc.
+ * Copyright (c) 2015-2016 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
index 8bded5d..c2b5bad 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2016 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
index 9d3eb25..568810b 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014 Qualcomm Atheros, Inc.
+ * Copyright (c) 2014-2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
index 191a8f3..6514d1a 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014 Qualcomm Atheros, Inc.
+ * Copyright (c) 2014,2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
index ef717b6..aa8978a 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014 Qualcomm Atheros, Inc.
+ * Copyright (c) 2014-2015 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
index 3abb97f..65e2419 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014 Qualcomm Atheros, Inc.
+ * Copyright (c) 2014-2016 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
index e0d00ce..e40edce 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2016 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
index d4986f6..5b3b021 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2016 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
index e7ea1ae..2bf401e 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2014,2016 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
index 41eef94..14093cf 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2014 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
index 8d53063..ae77a00 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2014 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -2494,7 +2494,6 @@ ath10k_wmi_tlv_op_gen_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
        void *ptr;
        int len;
        u32 buf_len = msdu->len;
-       u16 fc;
        struct ath10k_vif *arvif;
        dma_addr_t mgmt_frame_dma;
        u32 vdev_id;
@@ -2503,7 +2502,6 @@ ath10k_wmi_tlv_op_gen_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
                return ERR_PTR(-EINVAL);
 
        hdr = (struct ieee80211_hdr *)msdu->data;
-       fc = le16_to_cpu(hdr->frame_control);
        arvif = (void *)cb->vif->drv_priv;
        vdev_id = arvif->vdev_id;
 
index 4faaa64..da89128 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2014 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
index b6cbc02..58dc218 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
index f6d60dc..c7b30ed 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -196,6 +196,7 @@ enum wmi_service {
        WMI_SERVICE_TDLS_CONN_TRACKER_IN_HOST_MODE,
        WMI_SERVICE_TDLS_EXPLICIT_MODE_ONLY,
        WMI_SERVICE_MGMT_TX_WMI,
+       WMI_SERVICE_TDLS_WIDER_BANDWIDTH,
 
        /* keep last */
        WMI_SERVICE_MAX,
@@ -337,6 +338,7 @@ enum wmi_10_4_service {
        WMI_10_4_SERVICE_TDLS_UAPSD_SLEEP_STA,
        WMI_10_4_SERVICE_TDLS_CONN_TRACKER_IN_HOST_MODE,
        WMI_10_4_SERVICE_TDLS_EXPLICIT_MODE_ONLY,
+       WMI_10_4_SERVICE_TDLS_WIDER_BANDWIDTH,
 };
 
 static inline char *wmi_service_name(int service_id)
@@ -445,6 +447,7 @@ static inline char *wmi_service_name(int service_id)
        SVCSTR(WMI_SERVICE_SMART_LOGGING_SUPPORT);
        SVCSTR(WMI_SERVICE_TDLS_CONN_TRACKER_IN_HOST_MODE);
        SVCSTR(WMI_SERVICE_TDLS_EXPLICIT_MODE_ONLY);
+       SVCSTR(WMI_SERVICE_TDLS_WIDER_BANDWIDTH);
        default:
                return NULL;
        }
@@ -741,6 +744,8 @@ static inline void wmi_10_4_svc_map(const __le32 *in, unsigned long *out,
               WMI_SERVICE_TDLS_CONN_TRACKER_IN_HOST_MODE, len);
        SVCMAP(WMI_10_4_SERVICE_TDLS_EXPLICIT_MODE_ONLY,
               WMI_SERVICE_TDLS_EXPLICIT_MODE_ONLY, len);
+       SVCMAP(WMI_10_4_SERVICE_TDLS_WIDER_BANDWIDTH,
+              WMI_SERVICE_TDLS_WIDER_BANDWIDTH, len);
 }
 
 #undef SVCMAP
@@ -2924,7 +2929,7 @@ struct wmi_ext_resource_config_10_4_cmd {
        __le32 max_tdls_concurrent_buffer_sta;
 };
 
-/* strucutre describing host memory chunk. */
+/* structure describing host memory chunk. */
 struct host_memory_chunk {
        /* id of the request that is passed up in service ready */
        __le32 req_id;
index 0d46d6d..c4cbccb 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015 Qualcomm Atheros, Inc.
+ * Copyright (c) 2015-2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
index 9745b9d..6e81010 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015 Qualcomm Atheros, Inc.
+ * Copyright (c) 2015,2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
index 2914618..2a4871c 100644 (file)
@@ -626,7 +626,7 @@ int wcn36xx_smd_start_hw_scan(struct wcn36xx *wcn, struct ieee80211_vif *vif,
 
        msg_body.scan_type = WCN36XX_HAL_SCAN_TYPE_ACTIVE;
        msg_body.min_ch_time = 30;
-       msg_body.min_ch_time = 100;
+       msg_body.max_ch_time = 100;
        msg_body.scan_hidden = 1;
        memcpy(msg_body.mac, vif->addr, ETH_ALEN);
        msg_body.p2p_search = vif->p2p;
index 771a534..768f63f 100644 (file)
@@ -956,9 +956,8 @@ static int wil_cfg80211_set_channel(struct wiphy *wiphy,
                                    struct cfg80211_chan_def *chandef)
 {
        struct wil6210_priv *wil = wiphy_to_wil(wiphy);
-       struct wireless_dev *wdev = wil_to_wdev(wil);
 
-       wdev->preset_chandef = *chandef;
+       wil->monitor_chandef = *chandef;
 
        return 0;
 }
@@ -1751,6 +1750,69 @@ static int wil_cfg80211_resume(struct wiphy *wiphy)
        return 0;
 }
 
+static int
+wil_cfg80211_sched_scan_start(struct wiphy *wiphy,
+                             struct net_device *dev,
+                             struct cfg80211_sched_scan_request *request)
+{
+       struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+       int i, rc;
+
+       wil_dbg_misc(wil,
+                    "sched scan start: n_ssids %d, ie_len %zu, flags 0x%x\n",
+                    request->n_ssids, request->ie_len, request->flags);
+       for (i = 0; i < request->n_ssids; i++) {
+               wil_dbg_misc(wil, "SSID[%d]:", i);
+               wil_hex_dump_misc("SSID ", DUMP_PREFIX_OFFSET, 16, 1,
+                                 request->ssids[i].ssid,
+                                 request->ssids[i].ssid_len, true);
+       }
+       wil_dbg_misc(wil, "channels:");
+       for (i = 0; i < request->n_channels; i++)
+               wil_dbg_misc(wil, " %d%s", request->channels[i]->hw_value,
+                            i == request->n_channels - 1 ? "\n" : "");
+       wil_dbg_misc(wil, "n_match_sets %d, min_rssi_thold %d, delay %d\n",
+                    request->n_match_sets, request->min_rssi_thold,
+                    request->delay);
+       for (i = 0; i < request->n_match_sets; i++) {
+               struct cfg80211_match_set *ms = &request->match_sets[i];
+
+               wil_dbg_misc(wil, "MATCHSET[%d]: rssi_thold %d\n",
+                            i, ms->rssi_thold);
+               wil_hex_dump_misc("SSID ", DUMP_PREFIX_OFFSET, 16, 1,
+                                 ms->ssid.ssid,
+                                 ms->ssid.ssid_len, true);
+       }
+       wil_dbg_misc(wil, "n_scan_plans %d\n", request->n_scan_plans);
+       for (i = 0; i < request->n_scan_plans; i++) {
+               struct cfg80211_sched_scan_plan *sp = &request->scan_plans[i];
+
+               wil_dbg_misc(wil, "SCAN PLAN[%d]: interval %d iterations %d\n",
+                            i, sp->interval, sp->iterations);
+       }
+
+       rc = wmi_set_ie(wil, WMI_FRAME_PROBE_REQ, request->ie_len, request->ie);
+       if (rc)
+               return rc;
+       return wmi_start_sched_scan(wil, request);
+}
+
+static int
+wil_cfg80211_sched_scan_stop(struct wiphy *wiphy, struct net_device *dev,
+                            u64 reqid)
+{
+       struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+       int rc;
+
+       rc = wmi_stop_sched_scan(wil);
+       /* device would return error if it thinks PNO is already stopped.
+        * ignore the return code so user space and driver gets back in-sync
+        */
+       wil_dbg_misc(wil, "sched scan stopped (%d)\n", rc);
+
+       return 0;
+}
+
 static const struct cfg80211_ops wil_cfg80211_ops = {
        .add_virtual_intf = wil_cfg80211_add_iface,
        .del_virtual_intf = wil_cfg80211_del_iface,
@@ -1784,6 +1846,8 @@ static const struct cfg80211_ops wil_cfg80211_ops = {
        .set_power_mgmt = wil_cfg80211_set_power_mgmt,
        .suspend = wil_cfg80211_suspend,
        .resume = wil_cfg80211_resume,
+       .sched_scan_start = wil_cfg80211_sched_scan_start,
+       .sched_scan_stop = wil_cfg80211_sched_scan_stop,
 };
 
 static void wil_wiphy_init(struct wiphy *wiphy)
index 4475937..4a48882 100644 (file)
@@ -869,7 +869,6 @@ static ssize_t wil_write_file_txmgmt(struct file *file, const char __user *buf,
 
        params.buf = frame;
        params.len = len;
-       params.chan = wdev->preset_chandef.chan;
 
        rc = wil_cfg80211_mgmt_tx(wiphy, wdev, &params, NULL);
 
index 5cf3417..dcf87a7 100644 (file)
@@ -565,7 +565,7 @@ static irqreturn_t wil6210_hardirq(int irq, void *cookie)
        if (unlikely((pseudo_cause == 0) || ((pseudo_cause & 0xff) == 0xff)))
                return IRQ_NONE;
 
-       /* FIXME: IRQ mask debug */
+       /* IRQ mask debug */
        if (unlikely(wil6210_debug_irq_mask(wil, pseudo_cause)))
                return IRQ_NONE;
 
index 1b53cd3..aa6f9c4 100644 (file)
@@ -771,11 +771,11 @@ static void wil_collect_fw_info(struct wil6210_priv *wil)
 void wil_refresh_fw_capabilities(struct wil6210_priv *wil)
 {
        struct wiphy *wiphy = wil_to_wiphy(wil);
+       int features;
 
        wil->keep_radio_on_during_sleep =
-               wil->platform_ops.keep_radio_on_during_sleep &&
-               wil->platform_ops.keep_radio_on_during_sleep(
-                       wil->platform_handle) &&
+               test_bit(WIL_PLATFORM_CAPA_RADIO_ON_IN_SUSPEND,
+                        wil->platform_capa) &&
                test_bit(WMI_FW_CAPABILITY_D3_SUSPEND, wil->fw_capabilities);
 
        wil_info(wil, "keep_radio_on_during_sleep (%d)\n",
@@ -785,6 +785,24 @@ void wil_refresh_fw_capabilities(struct wil6210_priv *wil)
                wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
        else
                wiphy->signal_type = CFG80211_SIGNAL_TYPE_UNSPEC;
+
+       if (test_bit(WMI_FW_CAPABILITY_PNO, wil->fw_capabilities)) {
+               wiphy->max_sched_scan_reqs = 1;
+               wiphy->max_sched_scan_ssids = WMI_MAX_PNO_SSID_NUM;
+               wiphy->max_match_sets = WMI_MAX_PNO_SSID_NUM;
+               wiphy->max_sched_scan_ie_len = WMI_MAX_IE_LEN;
+               wiphy->max_sched_scan_plans = WMI_MAX_PLANS_NUM;
+       }
+
+       if (wil->platform_ops.set_features) {
+               features = (test_bit(WMI_FW_CAPABILITY_REF_CLOCK_CONTROL,
+                                    wil->fw_capabilities) &&
+                           test_bit(WIL_PLATFORM_CAPA_EXT_CLK,
+                                    wil->platform_capa)) ?
+                       BIT(WIL_PLATFORM_FEATURE_FW_EXT_CLK_CONTROL) : 0;
+
+               wil->platform_ops.set_features(wil->platform_handle, features);
+       }
 }
 
 void wil_mbox_ring_le2cpus(struct wil6210_mbox_ring *r)
@@ -980,6 +998,7 @@ static void wil_pre_fw_config(struct wil6210_priv *wil)
 int wil_reset(struct wil6210_priv *wil, bool load_fw)
 {
        int rc;
+       unsigned long status_flags = BIT(wil_status_resetting);
 
        wil_dbg_misc(wil, "reset\n");
 
@@ -1000,6 +1019,16 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
        if (wil->hw_version == HW_VER_UNKNOWN)
                return -ENODEV;
 
+       if (test_bit(WIL_PLATFORM_CAPA_T_PWR_ON_0, wil->platform_capa)) {
+               wil_dbg_misc(wil, "Notify FW to set T_POWER_ON=0\n");
+               wil_s(wil, RGF_USER_USAGE_8, BIT_USER_SUPPORT_T_POWER_ON_0);
+       }
+
+       if (test_bit(WIL_PLATFORM_CAPA_EXT_CLK, wil->platform_capa)) {
+               wil_dbg_misc(wil, "Notify FW on ext clock configuration\n");
+               wil_s(wil, RGF_USER_USAGE_8, BIT_USER_EXT_CLK);
+       }
+
        if (wil->platform_ops.notify) {
                rc = wil->platform_ops.notify(wil->platform_handle,
                                              WIL_PLATFORM_EVT_PRE_RESET);
@@ -1009,6 +1038,14 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
        }
 
        set_bit(wil_status_resetting, wil->status);
+       if (test_bit(wil_status_collecting_dumps, wil->status)) {
+               /* Device collects crash dump, cancel the reset.
+                * following crash dump collection, reset would take place.
+                */
+               wil_dbg_misc(wil, "reject reset while collecting crash dump\n");
+               rc = -EBUSY;
+               goto out;
+       }
 
        cancel_work_sync(&wil->disconnect_worker);
        wil6210_disconnect(wil, NULL, WLAN_REASON_DEAUTH_LEAVING, false);
@@ -1023,7 +1060,11 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
 
        /* prevent NAPI from being scheduled and prevent wmi commands */
        mutex_lock(&wil->wmi_mutex);
-       bitmap_zero(wil->status, wil_status_last);
+       if (test_bit(wil_status_suspending, wil->status))
+               status_flags |= BIT(wil_status_suspending);
+       bitmap_and(wil->status, wil->status, &status_flags,
+                  wil_status_last);
+       wil_dbg_misc(wil, "wil->status (0x%lx)\n", *wil->status);
        mutex_unlock(&wil->wmi_mutex);
 
        wil_mask_irq(wil);
@@ -1041,14 +1082,14 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
        wil_rx_fini(wil);
        if (rc) {
                wil_bl_crash_info(wil, true);
-               return rc;
+               goto out;
        }
 
        rc = wil_get_bl_info(wil);
        if (rc == -EAGAIN && !load_fw) /* ignore RF error if not going up */
                rc = 0;
        if (rc)
-               return rc;
+               goto out;
 
        wil_set_oob_mode(wil, oob_mode);
        if (load_fw) {
@@ -1060,10 +1101,10 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
                /* Loading f/w from the file */
                rc = wil_request_firmware(wil, wil->wil_fw_name, true);
                if (rc)
-                       return rc;
+                       goto out;
                rc = wil_request_firmware(wil, WIL_BOARD_FILE_NAME, true);
                if (rc)
-                       return rc;
+                       goto out;
 
                wil_pre_fw_config(wil);
                wil_release_cpu(wil);
@@ -1075,6 +1116,8 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
        reinit_completion(&wil->wmi_call);
        reinit_completion(&wil->halp.comp);
 
+       clear_bit(wil_status_resetting, wil->status);
+
        if (load_fw) {
                wil_configure_interrupt_moderation(wil);
                wil_unmask_irq(wil);
@@ -1108,6 +1151,10 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
        }
 
        return rc;
+
+out:
+       clear_bit(wil_status_resetting, wil->status);
+       return rc;
 }
 
 void wil_fw_error_recovery(struct wil6210_priv *wil)
@@ -1213,9 +1260,7 @@ int __wil_down(struct wil6210_priv *wil)
        wil_abort_scan(wil, false);
        mutex_unlock(&wil->p2p_wdev_mutex);
 
-       wil_reset(wil, false);
-
-       return 0;
+       return wil_reset(wil, false);
 }
 
 int wil_down(struct wil6210_priv *wil)
index b641ac1..7ba4e0a 100644 (file)
@@ -150,7 +150,7 @@ void *wil_if_alloc(struct device *dev)
        wdev->iftype = NL80211_IFTYPE_STATION; /* TODO */
        /* default monitor channel */
        ch = wdev->wiphy->bands[NL80211_BAND_60GHZ]->channels;
-       cfg80211_chandef_create(&wdev->preset_chandef, ch, NL80211_CHAN_NO_HT);
+       cfg80211_chandef_create(&wil->monitor_chandef, ch, NL80211_CHAN_NO_HT);
 
        ndev = alloc_netdev(0, "wlan%d", NET_NAME_UNKNOWN, wil_dev_setup);
        if (!ndev) {
index 42a5480..0c401bf 100644 (file)
@@ -31,10 +31,8 @@ static bool ftm_mode;
 module_param(ftm_mode, bool, 0444);
 MODULE_PARM_DESC(ftm_mode, " Set factory test mode, default - false");
 
-#ifdef CONFIG_PM
 static int wil6210_pm_notify(struct notifier_block *notify_block,
                             unsigned long mode, void *unused);
-#endif /* CONFIG_PM */
 
 static
 void wil_set_capabilities(struct wil6210_priv *wil)
@@ -43,9 +41,11 @@ void wil_set_capabilities(struct wil6210_priv *wil)
        u32 jtag_id = wil_r(wil, RGF_USER_JTAG_DEV_ID);
        u8 chip_revision = (wil_r(wil, RGF_USER_REVISION_ID) &
                            RGF_USER_REVISION_ID_MASK);
+       int platform_capa;
 
        bitmap_zero(wil->hw_capabilities, hw_capability_last);
        bitmap_zero(wil->fw_capabilities, WMI_FW_CAPABILITY_MAX);
+       bitmap_zero(wil->platform_capa, WIL_PLATFORM_CAPA_MAX);
        wil->wil_fw_name = ftm_mode ? WIL_FW_NAME_FTM_DEFAULT :
                           WIL_FW_NAME_DEFAULT;
        wil->chip_revision = chip_revision;
@@ -81,6 +81,14 @@ void wil_set_capabilities(struct wil6210_priv *wil)
 
        wil_info(wil, "Board hardware is %s\n", wil->hw_name);
 
+       /* Get platform capabilities */
+       if (wil->platform_ops.get_capa) {
+               platform_capa =
+                       wil->platform_ops.get_capa(wil->platform_handle);
+               memcpy(wil->platform_capa, &platform_capa,
+                      min(sizeof(wil->platform_capa), sizeof(platform_capa)));
+       }
+
        /* extract FW capabilities from file without loading the FW */
        wil_request_firmware(wil, wil->wil_fw_name, false);
        wil_refresh_fw_capabilities(wil);
@@ -206,6 +214,8 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
                .fw_recovery = wil_platform_rop_fw_recovery,
        };
        u32 bar_size = pci_resource_len(pdev, 0);
+       int dma_addr_size[] = {48, 40, 32}; /* keep descending order */
+       int i;
 
        /* check HW */
        dev_info(&pdev->dev, WIL_NAME
@@ -241,21 +251,23 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        }
        /* rollback to err_plat */
 
-       /* device supports 48bit addresses */
-       rc = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
-       if (rc) {
-               dev_err(dev, "dma_set_mask_and_coherent(48) failed: %d\n", rc);
-               rc = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+       /* device supports >32bit addresses */
+       for (i = 0; i < ARRAY_SIZE(dma_addr_size); i++) {
+               rc = dma_set_mask_and_coherent(dev,
+                                              DMA_BIT_MASK(dma_addr_size[i]));
                if (rc) {
-                       dev_err(dev,
-                               "dma_set_mask_and_coherent(32) failed: %d\n",
-                               rc);
-                       goto err_plat;
+                       dev_err(dev, "dma_set_mask_and_coherent(%d) failed: %d\n",
+                               dma_addr_size[i], rc);
+                       continue;
                }
-       } else {
-               wil->use_extended_dma_addr = 1;
+               dev_info(dev, "using dma mask %d", dma_addr_size[i]);
+               wil->dma_addr_size = dma_addr_size[i];
+               break;
        }
 
+       if (wil->dma_addr_size == 0)
+               goto err_plat;
+
        rc = pci_enable_device(pdev);
        if (rc && pdev->msi_enabled == 0) {
                wil_err(wil,
@@ -307,15 +319,15 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
                goto bus_disable;
        }
 
-#ifdef CONFIG_PM
-       wil->pm_notify.notifier_call = wil6210_pm_notify;
+       if (IS_ENABLED(CONFIG_PM))
+               wil->pm_notify.notifier_call = wil6210_pm_notify;
+
        rc = register_pm_notifier(&wil->pm_notify);
        if (rc)
                /* Do not fail the driver initialization, as suspend can
                 * be prevented in a later phase if needed
                 */
                wil_err(wil, "register_pm_notifier failed: %d\n", rc);
-#endif /* CONFIG_PM */
 
        wil6210_debugfs_init(wil);
 
@@ -346,9 +358,7 @@ static void wil_pcie_remove(struct pci_dev *pdev)
 
        wil_dbg_misc(wil, "pcie_remove\n");
 
-#ifdef CONFIG_PM
        unregister_pm_notifier(&wil->pm_notify);
-#endif /* CONFIG_PM */
 
        wil_pm_runtime_forbid(wil);
 
@@ -372,8 +382,6 @@ static const struct pci_device_id wil6210_pcie_ids[] = {
 };
 MODULE_DEVICE_TABLE(pci, wil6210_pcie_ids);
 
-#ifdef CONFIG_PM
-
 static int wil6210_suspend(struct device *dev, bool is_runtime)
 {
        int rc = 0;
@@ -481,17 +489,17 @@ static int wil6210_pm_notify(struct notifier_block *notify_block,
        return rc;
 }
 
-static int wil6210_pm_suspend(struct device *dev)
+static int __maybe_unused wil6210_pm_suspend(struct device *dev)
 {
        return wil6210_suspend(dev, false);
 }
 
-static int wil6210_pm_resume(struct device *dev)
+static int __maybe_unused wil6210_pm_resume(struct device *dev)
 {
        return wil6210_resume(dev, false);
 }
 
-static int wil6210_pm_runtime_idle(struct device *dev)
+static int __maybe_unused wil6210_pm_runtime_idle(struct device *dev)
 {
        struct pci_dev *pdev = to_pci_dev(dev);
        struct wil6210_priv *wil = pci_get_drvdata(pdev);
@@ -501,12 +509,12 @@ static int wil6210_pm_runtime_idle(struct device *dev)
        return wil_can_suspend(wil, true);
 }
 
-static int wil6210_pm_runtime_resume(struct device *dev)
+static int __maybe_unused wil6210_pm_runtime_resume(struct device *dev)
 {
        return wil6210_resume(dev, true);
 }
 
-static int wil6210_pm_runtime_suspend(struct device *dev)
+static int __maybe_unused wil6210_pm_runtime_suspend(struct device *dev)
 {
        struct pci_dev *pdev = to_pci_dev(dev);
        struct wil6210_priv *wil = pci_get_drvdata(pdev);
@@ -518,15 +526,12 @@ static int wil6210_pm_runtime_suspend(struct device *dev)
 
        return wil6210_suspend(dev, true);
 }
-#endif /* CONFIG_PM */
 
 static const struct dev_pm_ops wil6210_pm_ops = {
-#ifdef CONFIG_PM
        SET_SYSTEM_SLEEP_PM_OPS(wil6210_pm_suspend, wil6210_pm_resume)
        SET_RUNTIME_PM_OPS(wil6210_pm_runtime_suspend,
                           wil6210_pm_runtime_resume,
                           wil6210_pm_runtime_idle)
-#endif /* CONFIG_PM */
 };
 
 static struct pci_driver wil6210_driver = {
index 056b180..0a96518 100644 (file)
@@ -145,6 +145,13 @@ static int wil_suspend_keep_radio_on(struct wil6210_priv *wil)
 
        /* Prevent handling of new tx and wmi commands */
        set_bit(wil_status_suspending, wil->status);
+       if (test_bit(wil_status_collecting_dumps, wil->status)) {
+               /* Device collects crash dump, cancel the suspend */
+               wil_dbg_pm(wil, "reject suspend while collecting crash dump\n");
+               clear_bit(wil_status_suspending, wil->status);
+               wil->suspend_stats.rejected_by_host++;
+               return -EBUSY;
+       }
        wil_update_net_queues_bh(wil, NULL, true);
 
        if (!wil_is_tx_idle(wil)) {
@@ -255,6 +262,15 @@ static int wil_suspend_radio_off(struct wil6210_priv *wil)
 
        wil_dbg_pm(wil, "suspend radio off\n");
 
+       set_bit(wil_status_suspending, wil->status);
+       if (test_bit(wil_status_collecting_dumps, wil->status)) {
+               /* Device collects crash dump, cancel the suspend */
+               wil_dbg_pm(wil, "reject suspend while collecting crash dump\n");
+               clear_bit(wil_status_suspending, wil->status);
+               wil->suspend_stats.rejected_by_host++;
+               return -EBUSY;
+       }
+
        /* if netif up, hardware is alive, shut it down */
        if (ndev->flags & IFF_UP) {
                rc = wil_down(wil);
@@ -281,6 +297,7 @@ static int wil_suspend_radio_off(struct wil6210_priv *wil)
        set_bit(wil_status_suspended, wil->status);
 
 out:
+       clear_bit(wil_status_suspending, wil->status);
        wil_dbg_pm(wil, "suspend radio off: %d\n", rc);
 
        return rc;
index 2e301b6..4ea27b0 100644 (file)
@@ -111,14 +111,14 @@ void wil_pmc_alloc(struct wil6210_priv *wil,
         *
         * HW has limitation that all vrings addresses must share the same
         * upper 16 msb bits part of 48 bits address. To workaround that,
-        * if we are using 48 bit addresses switch to 32 bit allocation
-        * before allocating vring memory.
+        * if we are using more than 32 bit addresses switch to 32 bit
+        * allocation before allocating vring memory.
         *
         * There's no check for the return value of dma_set_mask_and_coherent,
         * since we assume if we were able to set the mask during
         * initialization in this system it will not fail if we set it again
         */
-       if (wil->use_extended_dma_addr)
+       if (wil->dma_addr_size > 32)
                dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
 
        pmc->pring_va = dma_alloc_coherent(dev,
@@ -126,8 +126,9 @@ void wil_pmc_alloc(struct wil6210_priv *wil,
                        &pmc->pring_pa,
                        GFP_KERNEL);
 
-       if (wil->use_extended_dma_addr)
-               dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
+       if (wil->dma_addr_size > 32)
+               dma_set_mask_and_coherent(dev,
+                                         DMA_BIT_MASK(wil->dma_addr_size));
 
        wil_dbg_misc(wil,
                     "pmc_alloc: allocated pring %p => %pad. %zd x %d = total %zd bytes\n",
index 389c718..16b8a4e 100644 (file)
@@ -178,14 +178,14 @@ static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring)
         *
         * HW has limitation that all vrings addresses must share the same
         * upper 16 msb bits part of 48 bits address. To workaround that,
-        * if we are using 48 bit addresses switch to 32 bit allocation
-        * before allocating vring memory.
+        * if we are using more than 32 bit addresses switch to 32 bit
+        * allocation before allocating vring memory.
         *
         * There's no check for the return value of dma_set_mask_and_coherent,
         * since we assume if we were able to set the mask during
         * initialization in this system it will not fail if we set it again
         */
-       if (wil->use_extended_dma_addr)
+       if (wil->dma_addr_size > 32)
                dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
 
        vring->va = dma_alloc_coherent(dev, sz, &vring->pa, GFP_KERNEL);
@@ -195,8 +195,9 @@ static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring)
                return -ENOMEM;
        }
 
-       if (wil->use_extended_dma_addr)
-               dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
+       if (wil->dma_addr_size > 32)
+               dma_set_mask_and_coherent(dev,
+                                         DMA_BIT_MASK(wil->dma_addr_size));
 
        /* initially, all descriptors are SW owned
         * For Tx and Rx, ownership bit is at the same location, thus
@@ -347,7 +348,6 @@ static int wil_vring_alloc_skb(struct wil6210_priv *wil, struct vring *vring,
 static void wil_rx_add_radiotap_header(struct wil6210_priv *wil,
                                       struct sk_buff *skb)
 {
-       struct wireless_dev *wdev = wil->wdev;
        struct wil6210_rtap {
                struct ieee80211_radiotap_header rthdr;
                /* fields should be in the order of bits in rthdr.it_present */
@@ -374,7 +374,7 @@ static void wil_rx_add_radiotap_header(struct wil6210_priv *wil,
        int rtap_len = sizeof(struct wil6210_rtap);
        int phy_length = 0; /* phy info header size, bytes */
        static char phy_data[128];
-       struct ieee80211_channel *ch = wdev->preset_chandef.chan;
+       struct ieee80211_channel *ch = wil->monitor_chandef.chan;
 
        if (rtap_include_phy_info) {
                rtap_len = sizeof(*rtap_vendor) + sizeof(*d);
index cf27d97..0a0766b 100644 (file)
@@ -161,6 +161,10 @@ struct RGF_ICR {
 #define RGF_USER_USAGE_6               (0x880018)
        #define BIT_USER_OOB_MODE               BIT(31)
        #define BIT_USER_OOB_R2_MODE            BIT(30)
+#define RGF_USER_USAGE_8               (0x880020)
+       #define BIT_USER_PREVENT_DEEP_SLEEP     BIT(0)
+       #define BIT_USER_SUPPORT_T_POWER_ON_0   BIT(1)
+       #define BIT_USER_EXT_CLK                BIT(2)
 #define RGF_USER_HW_MACHINE_STATE      (0x8801dc)
        #define HW_MACHINE_BOOT_DONE    (0x3fffffd)
 #define RGF_USER_USER_CPU_0            (0x8801e0)
@@ -435,12 +439,13 @@ enum { /* for wil6210_priv.status */
        wil_status_fwconnected,
        wil_status_dontscan,
        wil_status_mbox_ready, /* MBOX structures ready */
-       wil_status_irqen, /* FIXME: interrupts enabled - for debug */
+       wil_status_irqen, /* interrupts enabled - for debug */
        wil_status_napi_en, /* NAPI enabled protected by wil->mutex */
        wil_status_resetting, /* reset in progress */
        wil_status_suspending, /* suspend in progress */
        wil_status_suspended, /* suspend completed, device is suspended */
        wil_status_resuming, /* resume in progress */
+       wil_status_collecting_dumps, /* crashdump collection in progress */
        wil_status_last /* keep last */
 };
 
@@ -643,12 +648,14 @@ struct wil6210_priv {
        const char *wil_fw_name;
        DECLARE_BITMAP(hw_capabilities, hw_capability_last);
        DECLARE_BITMAP(fw_capabilities, WMI_FW_CAPABILITY_MAX);
+       DECLARE_BITMAP(platform_capa, WIL_PLATFORM_CAPA_MAX);
        u8 n_mids; /* number of additional MIDs as reported by FW */
        u32 recovery_count; /* num of FW recovery attempts in a short time */
        u32 recovery_state; /* FW recovery state machine */
        unsigned long last_fw_recovery; /* jiffies of last fw recovery */
        wait_queue_head_t wq; /* for all wait_event() use */
        /* profile */
+       struct cfg80211_chan_def monitor_chandef;
        u32 monitor_flags;
        u32 privacy; /* secure connection? */
        u8 hidden_ssid; /* relevant in AP mode */
@@ -704,7 +711,7 @@ struct wil6210_priv {
        struct wil_sta_info sta[WIL6210_MAX_CID];
        int bcast_vring;
        u32 vring_idle_trsh; /* HW fetches up to 16 descriptors at once  */
-       bool use_extended_dma_addr; /* indicates whether we are using 48 bits */
+       u32 dma_addr_size; /* indicates dma addr size */
        /* scan */
        struct cfg80211_scan_request *scan_request;
 
@@ -742,9 +749,7 @@ struct wil6210_priv {
 
        int fw_calib_result;
 
-#ifdef CONFIG_PM
        struct notifier_block pm_notify;
-#endif /* CONFIG_PM */
 
        bool suspend_resp_rcvd;
        bool suspend_resp_comp;
@@ -1032,4 +1037,8 @@ void wil_halp_unvote(struct wil6210_priv *wil);
 void wil6210_set_halp(struct wil6210_priv *wil);
 void wil6210_clear_halp(struct wil6210_priv *wil);
 
+int wmi_start_sched_scan(struct wil6210_priv *wil,
+                        struct cfg80211_sched_scan_request *request);
+int wmi_stop_sched_scan(struct wil6210_priv *wil);
+
 #endif /* __WIL6210_H__ */
index e53cf0c..1ed3306 100644 (file)
@@ -72,6 +72,15 @@ int wil_fw_copy_crash_dump(struct wil6210_priv *wil, void *dest, u32 size)
                return -EINVAL;
        }
 
+       set_bit(wil_status_collecting_dumps, wil->status);
+       if (test_bit(wil_status_suspending, wil->status) ||
+           test_bit(wil_status_suspended, wil->status) ||
+           test_bit(wil_status_resetting, wil->status)) {
+               wil_err(wil, "cannot collect fw dump during suspend/reset\n");
+               clear_bit(wil_status_collecting_dumps, wil->status);
+               return -EINVAL;
+       }
+
        /* copy to crash dump area */
        for (i = 0; i < ARRAY_SIZE(fw_mapping); i++) {
                map = &fw_mapping[i];
@@ -91,6 +100,8 @@ int wil_fw_copy_crash_dump(struct wil6210_priv *wil, void *dest, u32 size)
                                     (const void __iomem * __force)data, len);
        }
 
+       clear_bit(wil_status_collecting_dumps, wil->status);
+
        return 0;
 }
 
index 5d9e4bf..177026e 100644 (file)
@@ -27,6 +27,18 @@ enum wil_platform_event {
        WIL_PLATFORM_EVT_POST_SUSPEND = 4,
 };
 
+enum wil_platform_features {
+       WIL_PLATFORM_FEATURE_FW_EXT_CLK_CONTROL = 0,
+       WIL_PLATFORM_FEATURE_MAX,
+};
+
+enum wil_platform_capa {
+       WIL_PLATFORM_CAPA_RADIO_ON_IN_SUSPEND = 0,
+       WIL_PLATFORM_CAPA_T_PWR_ON_0 = 1,
+       WIL_PLATFORM_CAPA_EXT_CLK = 2,
+       WIL_PLATFORM_CAPA_MAX,
+};
+
 /**
  * struct wil_platform_ops - wil platform module calls from this
  * driver to platform driver
@@ -37,7 +49,8 @@ struct wil_platform_ops {
        int (*resume)(void *handle, bool device_powered_on);
        void (*uninit)(void *handle);
        int (*notify)(void *handle, enum wil_platform_event evt);
-       bool (*keep_radio_on_during_sleep)(void *handle);
+       int (*get_capa)(void *handle);
+       void (*set_features)(void *handle, int features);
 };
 
 /**
index 8ace618..2ab71bb 100644 (file)
@@ -38,6 +38,7 @@ MODULE_PARM_DESC(led_id,
                 " 60G device led enablement. Set the led ID (0-2) to enable");
 
 #define WIL_WAIT_FOR_SUSPEND_RESUME_COMP 200
+#define WIL_WMI_CALL_GENERAL_TO_MS 100
 
 /**
  * WMI event receiving - theory of operations
@@ -314,6 +315,10 @@ static const char *cmdid2name(u16 cmdid)
                return "WMI_LINK_MAINTAIN_CFG_WRITE_CMD";
        case WMI_LO_POWER_CALIB_FROM_OTP_CMDID:
                return "WMI_LO_POWER_CALIB_FROM_OTP_CMD";
+       case WMI_START_SCHED_SCAN_CMDID:
+               return "WMI_START_SCHED_SCAN_CMD";
+       case WMI_STOP_SCHED_SCAN_CMDID:
+               return "WMI_STOP_SCHED_SCAN_CMD";
        default:
                return "Untracked CMD";
        }
@@ -428,6 +433,12 @@ static const char *eventid2name(u16 eventid)
                return "WMI_LINK_MAINTAIN_CFG_WRITE_DONE_EVENT";
        case WMI_LO_POWER_CALIB_FROM_OTP_EVENTID:
                return "WMI_LO_POWER_CALIB_FROM_OTP_EVENT";
+       case WMI_START_SCHED_SCAN_EVENTID:
+               return "WMI_START_SCHED_SCAN_EVENT";
+       case WMI_STOP_SCHED_SCAN_EVENTID:
+               return "WMI_STOP_SCHED_SCAN_EVENT";
+       case WMI_SCHED_SCAN_RESULT_EVENTID:
+               return "WMI_SCHED_SCAN_RESULT_EVENT";
        default:
                return "Untracked EVENT";
        }
@@ -802,8 +813,6 @@ static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len)
                }
        }
 
-       /* FIXME FW can transmit only ucast frames to peer */
-       /* FIXME real ring_id instead of hard coded 0 */
        ether_addr_copy(wil->sta[evt->cid].addr, evt->bssid);
        wil->sta[evt->cid].status = wil_sta_conn_pending;
 
@@ -1066,6 +1075,75 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
        spin_unlock_bh(&sta->tid_rx_lock);
 }
 
+static void
+wmi_evt_sched_scan_result(struct wil6210_priv *wil, int id, void *d, int len)
+{
+       struct wmi_sched_scan_result_event *data = d;
+       struct wiphy *wiphy = wil_to_wiphy(wil);
+       struct ieee80211_mgmt *rx_mgmt_frame =
+               (struct ieee80211_mgmt *)data->payload;
+       int flen = len - offsetof(struct wmi_sched_scan_result_event, payload);
+       int ch_no;
+       u32 freq;
+       struct ieee80211_channel *channel;
+       s32 signal;
+       __le16 fc;
+       u32 d_len;
+       struct cfg80211_bss *bss;
+
+       if (flen < 0) {
+               wil_err(wil, "sched scan result event too short, len %d\n",
+                       len);
+               return;
+       }
+
+       d_len = le32_to_cpu(data->info.len);
+       if (d_len != flen) {
+               wil_err(wil,
+                       "sched scan result length mismatch, d_len %d should be %d\n",
+                       d_len, flen);
+               return;
+       }
+
+       fc = rx_mgmt_frame->frame_control;
+       if (!ieee80211_is_probe_resp(fc)) {
+               wil_err(wil, "sched scan result invalid frame, fc 0x%04x\n",
+                       fc);
+               return;
+       }
+
+       ch_no = data->info.channel + 1;
+       freq = ieee80211_channel_to_frequency(ch_no, NL80211_BAND_60GHZ);
+       channel = ieee80211_get_channel(wiphy, freq);
+       if (test_bit(WMI_FW_CAPABILITY_RSSI_REPORTING, wil->fw_capabilities))
+               signal = 100 * data->info.rssi;
+       else
+               signal = data->info.sqi;
+
+       wil_dbg_wmi(wil, "sched scan result: channel %d MCS %d RSSI %d\n",
+                   data->info.channel, data->info.mcs, data->info.rssi);
+       wil_dbg_wmi(wil, "len %d qid %d mid %d cid %d\n",
+                   d_len, data->info.qid, data->info.mid, data->info.cid);
+       wil_hex_dump_wmi("PROBE ", DUMP_PREFIX_OFFSET, 16, 1, rx_mgmt_frame,
+                        d_len, true);
+
+       if (!channel) {
+               wil_err(wil, "Frame on unsupported channel\n");
+               return;
+       }
+
+       bss = cfg80211_inform_bss_frame(wiphy, channel, rx_mgmt_frame,
+                                       d_len, signal, GFP_KERNEL);
+       if (bss) {
+               wil_dbg_wmi(wil, "Added BSS %pM\n", rx_mgmt_frame->bssid);
+               cfg80211_put_bss(wiphy, bss);
+       } else {
+               wil_err(wil, "cfg80211_inform_bss_frame() failed\n");
+       }
+
+       cfg80211_sched_scan_results(wiphy, 0);
+}
+
 /**
  * Some events are ignored for purpose; and need not be interpreted as
  * "unhandled events"
@@ -1093,6 +1171,7 @@ static const struct {
        {WMI_DELBA_EVENTID,             wmi_evt_delba},
        {WMI_VRING_EN_EVENTID,          wmi_evt_vring_en},
        {WMI_DATA_PORT_OPEN_EVENTID,            wmi_evt_ignore},
+       {WMI_SCHED_SCAN_RESULT_EVENTID,         wmi_evt_sched_scan_result},
 };
 
 /*
@@ -1703,7 +1782,7 @@ int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring)
        int rc;
 
        if (wdev->iftype == NL80211_IFTYPE_MONITOR) {
-               struct ieee80211_channel *ch = wdev->preset_chandef.chan;
+               struct ieee80211_channel *ch = wil->monitor_chandef.chan;
 
                cmd.sniffer_cfg.mode = cpu_to_le32(WMI_SNIFFER_ON);
                if (ch)
@@ -2284,3 +2363,159 @@ out:
        spin_unlock_irqrestore(&wil->wmi_ev_lock, flags);
        return rc;
 }
+
+static void
+wmi_sched_scan_set_ssids(struct wil6210_priv *wil,
+                        struct wmi_start_sched_scan_cmd *cmd,
+                        struct cfg80211_ssid *ssids, int n_ssids,
+                        struct cfg80211_match_set *match_sets,
+                        int n_match_sets)
+{
+       int i;
+
+       if (n_match_sets > WMI_MAX_PNO_SSID_NUM) {
+               wil_dbg_wmi(wil, "too many match sets (%d), use first %d\n",
+                           n_match_sets, WMI_MAX_PNO_SSID_NUM);
+               n_match_sets = WMI_MAX_PNO_SSID_NUM;
+       }
+       cmd->num_of_ssids = n_match_sets;
+
+       for (i = 0; i < n_match_sets; i++) {
+               struct wmi_sched_scan_ssid_match *wmi_match =
+                       &cmd->ssid_for_match[i];
+               struct cfg80211_match_set *cfg_match = &match_sets[i];
+               int j;
+
+               wmi_match->ssid_len = cfg_match->ssid.ssid_len;
+               memcpy(wmi_match->ssid, cfg_match->ssid.ssid,
+                      min_t(u8, wmi_match->ssid_len, WMI_MAX_SSID_LEN));
+               wmi_match->rssi_threshold = S8_MIN;
+               if (cfg_match->rssi_thold >= S8_MIN &&
+                   cfg_match->rssi_thold <= S8_MAX)
+                       wmi_match->rssi_threshold = cfg_match->rssi_thold;
+
+               for (j = 0; j < n_ssids; j++)
+                       if (wmi_match->ssid_len == ssids[j].ssid_len &&
+                           memcmp(wmi_match->ssid, ssids[j].ssid,
+                                  wmi_match->ssid_len) == 0)
+                               wmi_match->add_ssid_to_probe = true;
+       }
+}
+
+static void
+wmi_sched_scan_set_channels(struct wil6210_priv *wil,
+                           struct wmi_start_sched_scan_cmd *cmd,
+                           u32 n_channels,
+                           struct ieee80211_channel **channels)
+{
+       int i;
+
+       if (n_channels > WMI_MAX_CHANNEL_NUM) {
+               wil_dbg_wmi(wil, "too many channels (%d), use first %d\n",
+                           n_channels, WMI_MAX_CHANNEL_NUM);
+               n_channels = WMI_MAX_CHANNEL_NUM;
+       }
+       cmd->num_of_channels = n_channels;
+
+       for (i = 0; i < n_channels; i++) {
+               struct ieee80211_channel *cfg_chan = channels[i];
+
+               cmd->channel_list[i] = cfg_chan->hw_value - 1;
+       }
+}
+
+static void
+wmi_sched_scan_set_plans(struct wil6210_priv *wil,
+                        struct wmi_start_sched_scan_cmd *cmd,
+                        struct cfg80211_sched_scan_plan *scan_plans,
+                        int n_scan_plans)
+{
+       int i;
+
+       if (n_scan_plans > WMI_MAX_PLANS_NUM) {
+               wil_dbg_wmi(wil, "too many plans (%d), use first %d\n",
+                           n_scan_plans, WMI_MAX_PLANS_NUM);
+               n_scan_plans = WMI_MAX_PLANS_NUM;
+       }
+
+       for (i = 0; i < n_scan_plans; i++) {
+               struct cfg80211_sched_scan_plan *cfg_plan = &scan_plans[i];
+
+               cmd->scan_plans[i].interval_sec =
+                       cpu_to_le16(cfg_plan->interval);
+               cmd->scan_plans[i].num_of_iterations =
+                       cpu_to_le16(cfg_plan->iterations);
+       }
+}
+
+int wmi_start_sched_scan(struct wil6210_priv *wil,
+                        struct cfg80211_sched_scan_request *request)
+{
+       int rc;
+       struct wmi_start_sched_scan_cmd cmd = {
+               .min_rssi_threshold = S8_MIN,
+               .initial_delay_sec = cpu_to_le16(request->delay),
+       };
+       struct {
+               struct wmi_cmd_hdr wmi;
+               struct wmi_start_sched_scan_event evt;
+       } __packed reply;
+
+       if (!test_bit(WMI_FW_CAPABILITY_PNO, wil->fw_capabilities))
+               return -ENOTSUPP;
+
+       if (request->min_rssi_thold >= S8_MIN &&
+           request->min_rssi_thold <= S8_MAX)
+               cmd.min_rssi_threshold = request->min_rssi_thold;
+
+       wmi_sched_scan_set_ssids(wil, &cmd, request->ssids, request->n_ssids,
+                                request->match_sets, request->n_match_sets);
+       wmi_sched_scan_set_channels(wil, &cmd,
+                                   request->n_channels, request->channels);
+       wmi_sched_scan_set_plans(wil, &cmd,
+                                request->scan_plans, request->n_scan_plans);
+
+       reply.evt.result = WMI_PNO_REJECT;
+
+       rc = wmi_call(wil, WMI_START_SCHED_SCAN_CMDID, &cmd, sizeof(cmd),
+                     WMI_START_SCHED_SCAN_EVENTID, &reply, sizeof(reply),
+                     WIL_WMI_CALL_GENERAL_TO_MS);
+       if (rc)
+               return rc;
+
+       if (reply.evt.result != WMI_PNO_SUCCESS) {
+               wil_err(wil, "start sched scan failed, result %d\n",
+                       reply.evt.result);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+int wmi_stop_sched_scan(struct wil6210_priv *wil)
+{
+       int rc;
+       struct {
+               struct wmi_cmd_hdr wmi;
+               struct wmi_stop_sched_scan_event evt;
+       } __packed reply;
+
+       if (!test_bit(WMI_FW_CAPABILITY_PNO, wil->fw_capabilities))
+               return -ENOTSUPP;
+
+       reply.evt.result = WMI_PNO_REJECT;
+
+       rc = wmi_call(wil, WMI_STOP_SCHED_SCAN_CMDID, NULL, 0,
+                     WMI_STOP_SCHED_SCAN_EVENTID, &reply, sizeof(reply),
+                     WIL_WMI_CALL_GENERAL_TO_MS);
+       if (rc)
+               return rc;
+
+       if (reply.evt.result != WMI_PNO_SUCCESS) {
+               wil_err(wil, "stop sched scan failed, result %d\n",
+                       reply.evt.result);
+               return -EINVAL;
+       }
+
+       return 0;
+}
index d9e220a..d3e75f0 100644 (file)
@@ -71,6 +71,8 @@ enum wmi_fw_capability {
        WMI_FW_CAPABILITY_RSSI_REPORTING                = 12,
        WMI_FW_CAPABILITY_SET_SILENT_RSSI_TABLE         = 13,
        WMI_FW_CAPABILITY_LO_POWER_CALIB_FROM_OTP       = 14,
+       WMI_FW_CAPABILITY_PNO                           = 15,
+       WMI_FW_CAPABILITY_REF_CLOCK_CONTROL             = 18,
        WMI_FW_CAPABILITY_MAX,
 };
 
@@ -87,6 +89,8 @@ enum wmi_command_id {
        WMI_CONNECT_CMDID                               = 0x01,
        WMI_DISCONNECT_CMDID                            = 0x03,
        WMI_DISCONNECT_STA_CMDID                        = 0x04,
+       WMI_START_SCHED_SCAN_CMDID                      = 0x05,
+       WMI_STOP_SCHED_SCAN_CMDID                       = 0x06,
        WMI_START_SCAN_CMDID                            = 0x07,
        WMI_SET_BSS_FILTER_CMDID                        = 0x09,
        WMI_SET_PROBED_SSID_CMDID                       = 0x0A,
@@ -385,6 +389,38 @@ struct wmi_start_scan_cmd {
        } channel_list[0];
 } __packed;
 
+#define WMI_MAX_PNO_SSID_NUM   (16)
+#define WMI_MAX_CHANNEL_NUM    (6)
+#define WMI_MAX_PLANS_NUM      (2)
+
+/* WMI_START_SCHED_SCAN_CMDID */
+struct wmi_sched_scan_ssid_match {
+       u8 ssid_len;
+       u8 ssid[WMI_MAX_SSID_LEN];
+       s8 rssi_threshold;
+       /* boolean */
+       u8 add_ssid_to_probe;
+       u8 reserved;
+} __packed;
+
+/* WMI_START_SCHED_SCAN_CMDID */
+struct wmi_sched_scan_plan {
+       __le16 interval_sec;
+       __le16 num_of_iterations;
+} __packed;
+
+/* WMI_START_SCHED_SCAN_CMDID */
+struct wmi_start_sched_scan_cmd {
+       struct wmi_sched_scan_ssid_match ssid_for_match[WMI_MAX_PNO_SSID_NUM];
+       u8 num_of_ssids;
+       s8 min_rssi_threshold;
+       u8 channel_list[WMI_MAX_CHANNEL_NUM];
+       u8 num_of_channels;
+       u8 reserved;
+       __le16 initial_delay_sec;
+       struct wmi_sched_scan_plan scan_plans[WMI_MAX_PLANS_NUM];
+} __packed;
+
 /* WMI_SET_PROBED_SSID_CMDID */
 #define MAX_PROBED_SSID_INDEX  (3)
 
@@ -1238,6 +1274,9 @@ enum wmi_event_id {
        WMI_READY_EVENTID                               = 0x1001,
        WMI_CONNECT_EVENTID                             = 0x1002,
        WMI_DISCONNECT_EVENTID                          = 0x1003,
+       WMI_START_SCHED_SCAN_EVENTID                    = 0x1005,
+       WMI_STOP_SCHED_SCAN_EVENTID                     = 0x1006,
+       WMI_SCHED_SCAN_RESULT_EVENTID                   = 0x1007,
        WMI_SCAN_COMPLETE_EVENTID                       = 0x100A,
        WMI_REPORT_STATISTICS_EVENTID                   = 0x100B,
        WMI_RD_MEM_RSP_EVENTID                          = 0x1800,
@@ -1600,6 +1639,49 @@ struct wmi_scan_complete_event {
        __le32 status;
 } __packed;
 
+/* wmi_rx_mgmt_info */
+struct wmi_rx_mgmt_info {
+       u8 mcs;
+       s8 rssi;
+       u8 range;
+       u8 sqi;
+       __le16 stype;
+       __le16 status;
+       __le32 len;
+       /* Not resolved when == 0xFFFFFFFF == > Broadcast to all MIDS */
+       u8 qid;
+       /* Not resolved when == 0xFFFFFFFF == > Broadcast to all MIDS */
+       u8 mid;
+       u8 cid;
+       /* From Radio MNGR */
+       u8 channel;
+} __packed;
+
+/* WMI_START_SCHED_SCAN_EVENTID */
+enum wmi_pno_result {
+       WMI_PNO_SUCCESS                 = 0x00,
+       WMI_PNO_REJECT                  = 0x01,
+       WMI_PNO_INVALID_PARAMETERS      = 0x02,
+       WMI_PNO_NOT_ENABLED             = 0x03,
+};
+
+struct wmi_start_sched_scan_event {
+       /* pno_result */
+       u8 result;
+       u8 reserved[3];
+} __packed;
+
+struct wmi_stop_sched_scan_event {
+       /* pno_result */
+       u8 result;
+       u8 reserved[3];
+} __packed;
+
+struct wmi_sched_scan_result_event {
+       struct wmi_rx_mgmt_info info;
+       u8 payload[0];
+} __packed;
+
 /* WMI_ACS_PASSIVE_SCAN_COMPLETE_EVENT */
 enum wmi_acs_info_bitmask {
        WMI_ACS_INFO_BITMASK_BEACON_FOUND       = 0x01,
@@ -1814,24 +1896,6 @@ struct wmi_get_ssid_event {
        u8 ssid[WMI_MAX_SSID_LEN];
 } __packed;
 
-/* wmi_rx_mgmt_info */
-struct wmi_rx_mgmt_info {
-       u8 mcs;
-       s8 rssi;
-       u8 range;
-       u8 sqi;
-       __le16 stype;
-       __le16 status;
-       __le32 len;
-       /* Not resolved when == 0xFFFFFFFF == > Broadcast to all MIDS */
-       u8 qid;
-       /* Not resolved when == 0xFFFFFFFF == > Broadcast to all MIDS */
-       u8 mid;
-       u8 cid;
-       /* From Radio MNGR */
-       u8 channel;
-} __packed;
-
 /* EVENT: WMI_RF_XPM_READ_RESULT_EVENTID */
 struct wmi_rf_xpm_read_result_event {
        /* enum wmi_fw_status_e - success=0 or fail=1 */
index f8b47c1..e3366ab 100644 (file)
@@ -149,7 +149,8 @@ int brcmf_sdiod_intr_register(struct brcmf_sdio_dev *sdiodev)
 
                /* must configure SDIO_CCCR_IENx to enable irq */
                data = brcmf_sdiod_func0_rb(sdiodev, SDIO_CCCR_IENx, &ret);
-               data |= 1 << SDIO_FUNC_1 | 1 << SDIO_FUNC_2 | 1;
+               data |= SDIO_CCCR_IEN_FUNC1 | SDIO_CCCR_IEN_FUNC2 |
+                       SDIO_CCCR_IEN_FUNC0;
                brcmf_sdiod_func0_wb(sdiodev, SDIO_CCCR_IENx, data, &ret);
 
                /* redirect, configure and enable io for interrupt signal */
@@ -291,8 +292,9 @@ out:
                *ret = retval;
 }
 
-static int brcmf_sdiod_buff_read(struct brcmf_sdio_dev *sdiodev, uint fn,
-                                u32 addr, struct sk_buff *pkt)
+static int brcmf_sdiod_buff_read(struct brcmf_sdio_dev *sdiodev,
+                                struct sdio_func *func, u32 addr,
+                                struct sk_buff *pkt)
 {
        unsigned int req_sz;
        int err;
@@ -301,13 +303,19 @@ static int brcmf_sdiod_buff_read(struct brcmf_sdio_dev *sdiodev, uint fn,
        req_sz = pkt->len + 3;
        req_sz &= (uint)~3;
 
-       if (fn == 1)
-               err = sdio_memcpy_fromio(sdiodev->func[fn],
-                                        ((u8 *)(pkt->data)), addr, req_sz);
-       else
-               /* function 2 read is FIFO operation */
-               err = sdio_readsb(sdiodev->func[fn],
-                                 ((u8 *)(pkt->data)), addr, req_sz);
+       switch (func->num) {
+       case 1:
+               err = sdio_memcpy_fromio(func, ((u8 *)(pkt->data)), addr,
+                                        req_sz);
+               break;
+       case 2:
+               err = sdio_readsb(func, ((u8 *)(pkt->data)), addr, req_sz);
+               break;
+       default:
+               /* bail out as things are really fishy here */
+               WARN(1, "invalid sdio function number: %d\n", func->num);
+               err = -ENOMEDIUM;
+       };
 
        if (err == -ENOMEDIUM)
                brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_NOMEDIUM);
@@ -315,8 +323,9 @@ static int brcmf_sdiod_buff_read(struct brcmf_sdio_dev *sdiodev, uint fn,
        return err;
 }
 
-static int brcmf_sdiod_buff_write(struct brcmf_sdio_dev *sdiodev, uint fn,
-                                 u32 addr, struct sk_buff *pkt)
+static int brcmf_sdiod_buff_write(struct brcmf_sdio_dev *sdiodev,
+                                 struct sdio_func *func, u32 addr,
+                                 struct sk_buff *pkt)
 {
        unsigned int req_sz;
        int err;
@@ -325,8 +334,7 @@ static int brcmf_sdiod_buff_write(struct brcmf_sdio_dev *sdiodev, uint fn,
        req_sz = pkt->len + 3;
        req_sz &= (uint)~3;
 
-       err = sdio_memcpy_toio(sdiodev->func[fn], addr,
-                              ((u8 *)(pkt->data)), req_sz);
+       err = sdio_memcpy_toio(func, addr, ((u8 *)(pkt->data)), req_sz);
 
        if (err == -ENOMEDIUM)
                brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_NOMEDIUM);
@@ -337,7 +345,7 @@ static int brcmf_sdiod_buff_write(struct brcmf_sdio_dev *sdiodev, uint fn,
 /**
  * brcmf_sdiod_sglist_rw - SDIO interface function for block data access
  * @sdiodev: brcmfmac sdio device
- * @fn: SDIO function number
+ * @func: SDIO function
  * @write: direction flag
  * @addr: dongle memory address as source/destination
  * @pkt: skb pointer
@@ -346,7 +354,8 @@ static int brcmf_sdiod_buff_write(struct brcmf_sdio_dev *sdiodev, uint fn,
  * stack for block data access. It assumes that the skb passed down by the
  * caller has already been padded and aligned.
  */
-static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev, uint fn,
+static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev,
+                                struct sdio_func *func,
                                 bool write, u32 addr,
                                 struct sk_buff_head *pktlist)
 {
@@ -372,7 +381,7 @@ static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev, uint fn,
                req_sz = 0;
                skb_queue_walk(pktlist, pkt_next)
                        req_sz += pkt_next->len;
-               req_sz = ALIGN(req_sz, sdiodev->func[fn]->cur_blksize);
+               req_sz = ALIGN(req_sz, func->cur_blksize);
                while (req_sz > PAGE_SIZE) {
                        pkt_next = brcmu_pkt_buf_get_skb(PAGE_SIZE);
                        if (pkt_next == NULL) {
@@ -391,7 +400,7 @@ static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev, uint fn,
                target_list = &local_list;
        }
 
-       func_blk_sz = sdiodev->func[fn]->cur_blksize;
+       func_blk_sz = func->cur_blksize;
        max_req_sz = sdiodev->max_request_size;
        max_seg_cnt = min_t(unsigned short, sdiodev->max_segment_count,
                            target_list->qlen);
@@ -408,10 +417,10 @@ static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev, uint fn,
        mmc_dat.flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
        mmc_cmd.opcode = SD_IO_RW_EXTENDED;
        mmc_cmd.arg = write ? 1<<31 : 0;        /* write flag  */
-       mmc_cmd.arg |= (fn & 0x7) << 28;        /* SDIO func num */
-       mmc_cmd.arg |= 1<<27;                   /* block mode */
+       mmc_cmd.arg |= (func->num & 0x7) << 28; /* SDIO func num */
+       mmc_cmd.arg |= 1 << 27;                 /* block mode */
        /* for function 1 the addr will be incremented */
-       mmc_cmd.arg |= (fn == 1) ? 1<<26 : 0;
+       mmc_cmd.arg |= (func->num == 1) ? 1 << 26 : 0;
        mmc_cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC;
        mmc_req.cmd = &mmc_cmd;
        mmc_req.data = &mmc_dat;
@@ -457,11 +466,11 @@ static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev, uint fn,
                mmc_cmd.arg |= (addr & 0x1FFFF) << 9;   /* address */
                mmc_cmd.arg |= mmc_dat.blocks & 0x1FF;  /* block count */
                /* incrementing addr for function 1 */
-               if (fn == 1)
+               if (func->num == 1)
                        addr += req_sz;
 
-               mmc_set_data_timeout(&mmc_dat, sdiodev->func[fn]->card);
-               mmc_wait_for_req(sdiodev->func[fn]->card->host, &mmc_req);
+               mmc_set_data_timeout(&mmc_dat, func->card);
+               mmc_wait_for_req(func->card->host, &mmc_req);
 
                ret = mmc_cmd.error ? mmc_cmd.error : mmc_dat.error;
                if (ret == -ENOMEDIUM) {
@@ -529,7 +538,7 @@ int brcmf_sdiod_recv_buf(struct brcmf_sdio_dev *sdiodev, u8 *buf, uint nbytes)
 
 int brcmf_sdiod_recv_pkt(struct brcmf_sdio_dev *sdiodev, struct sk_buff *pkt)
 {
-       u32 addr = sdiodev->sbwad;
+       u32 addr = sdiodev->cc_core->base;
        int err = 0;
 
        brcmf_dbg(SDIO, "addr = 0x%x, size = %d\n", addr, pkt->len);
@@ -541,7 +550,7 @@ int brcmf_sdiod_recv_pkt(struct brcmf_sdio_dev *sdiodev, struct sk_buff *pkt)
        addr &= SBSDIO_SB_OFT_ADDR_MASK;
        addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
 
-       err = brcmf_sdiod_buff_read(sdiodev, SDIO_FUNC_2, addr, pkt);
+       err = brcmf_sdiod_buff_read(sdiodev, sdiodev->func[2], addr, pkt);
 
 done:
        return err;
@@ -552,7 +561,7 @@ int brcmf_sdiod_recv_chain(struct brcmf_sdio_dev *sdiodev,
 {
        struct sk_buff *glom_skb = NULL;
        struct sk_buff *skb;
-       u32 addr = sdiodev->sbwad;
+       u32 addr = sdiodev->cc_core->base;
        int err = 0;
 
        brcmf_dbg(SDIO, "addr = 0x%x, size = %d\n",
@@ -566,13 +575,13 @@ int brcmf_sdiod_recv_chain(struct brcmf_sdio_dev *sdiodev,
        addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
 
        if (pktq->qlen == 1)
-               err = brcmf_sdiod_buff_read(sdiodev, SDIO_FUNC_2, addr,
+               err = brcmf_sdiod_buff_read(sdiodev, sdiodev->func[2], addr,
                                            pktq->next);
        else if (!sdiodev->sg_support) {
                glom_skb = brcmu_pkt_buf_get_skb(totlen);
                if (!glom_skb)
                        return -ENOMEM;
-               err = brcmf_sdiod_buff_read(sdiodev, SDIO_FUNC_2, addr,
+               err = brcmf_sdiod_buff_read(sdiodev, sdiodev->func[2], addr,
                                            glom_skb);
                if (err)
                        goto done;
@@ -582,8 +591,8 @@ int brcmf_sdiod_recv_chain(struct brcmf_sdio_dev *sdiodev,
                        skb_pull(glom_skb, skb->len);
                }
        } else
-               err = brcmf_sdiod_sglist_rw(sdiodev, SDIO_FUNC_2, false, addr,
-                                           pktq);
+               err = brcmf_sdiod_sglist_rw(sdiodev, sdiodev->func[2], false,
+                                           addr, pktq);
 
 done:
        brcmu_pkt_buf_free_skb(glom_skb);
@@ -593,7 +602,7 @@ done:
 int brcmf_sdiod_send_buf(struct brcmf_sdio_dev *sdiodev, u8 *buf, uint nbytes)
 {
        struct sk_buff *mypkt;
-       u32 addr = sdiodev->sbwad;
+       u32 addr = sdiodev->cc_core->base;
        int err;
 
        mypkt = brcmu_pkt_buf_get_skb(nbytes);
@@ -614,7 +623,8 @@ int brcmf_sdiod_send_buf(struct brcmf_sdio_dev *sdiodev, u8 *buf, uint nbytes)
        addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
 
        if (!err)
-               err = brcmf_sdiod_buff_write(sdiodev, SDIO_FUNC_2, addr, mypkt);
+               err = brcmf_sdiod_buff_write(sdiodev, sdiodev->func[2], addr,
+                                            mypkt);
 
        brcmu_pkt_buf_free_skb(mypkt);
 
@@ -625,7 +635,7 @@ int brcmf_sdiod_send_pkt(struct brcmf_sdio_dev *sdiodev,
                         struct sk_buff_head *pktq)
 {
        struct sk_buff *skb;
-       u32 addr = sdiodev->sbwad;
+       u32 addr = sdiodev->cc_core->base;
        int err;
 
        brcmf_dbg(SDIO, "addr = 0x%x, size = %d\n", addr, pktq->qlen);
@@ -639,14 +649,14 @@ int brcmf_sdiod_send_pkt(struct brcmf_sdio_dev *sdiodev,
 
        if (pktq->qlen == 1 || !sdiodev->sg_support) {
                skb_queue_walk(pktq, skb) {
-                       err = brcmf_sdiod_buff_write(sdiodev, SDIO_FUNC_2,
+                       err = brcmf_sdiod_buff_write(sdiodev, sdiodev->func[2],
                                                     addr, skb);
                        if (err)
                                break;
                }
        } else {
-               err = brcmf_sdiod_sglist_rw(sdiodev, SDIO_FUNC_2, true, addr,
-                                           pktq);
+               err = brcmf_sdiod_sglist_rw(sdiodev, sdiodev->func[2], true,
+                                           addr, pktq);
        }
 
        return err;
@@ -696,10 +706,10 @@ brcmf_sdiod_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
 
                if (write) {
                        memcpy(pkt->data, data, dsize);
-                       err = brcmf_sdiod_buff_write(sdiodev, SDIO_FUNC_1,
+                       err = brcmf_sdiod_buff_write(sdiodev, sdiodev->func[1],
                                                     sdaddr, pkt);
                } else {
-                       err = brcmf_sdiod_buff_read(sdiodev, SDIO_FUNC_1,
+                       err = brcmf_sdiod_buff_read(sdiodev, sdiodev->func[1],
                                                    sdaddr, pkt);
                }
 
@@ -728,12 +738,12 @@ brcmf_sdiod_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
        return err;
 }
 
-int brcmf_sdiod_abort(struct brcmf_sdio_dev *sdiodev, u8 fn)
+int brcmf_sdiod_abort(struct brcmf_sdio_dev *sdiodev, struct sdio_func *func)
 {
        brcmf_dbg(SDIO, "Enter\n");
 
        /* Issue abort cmd52 command through F0 */
-       brcmf_sdiod_func0_wb(sdiodev, SDIO_CCCR_ABORT, fn, NULL);
+       brcmf_sdiod_func0_wb(sdiodev, SDIO_CCCR_ABORT, func->num, NULL);
 
        brcmf_dbg(SDIO, "Exit\n");
        return 0;
@@ -995,6 +1005,10 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
        brcmf_dbg(SDIO, "Function#: %d\n", func->num);
 
        dev = &func->dev;
+
+       /* Set MMC_QUIRK_LENIENT_FN0 for this card */
+       func->card->quirks |= MMC_QUIRK_LENIENT_FN0;
+
        /* prohibit ACPI power management for this device */
        brcmf_sdiod_acpi_set_power_manageable(dev, 0);
 
@@ -1018,8 +1032,7 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
        /* store refs to functions used. mmc_card does
         * not hold the F0 function pointer.
         */
-       sdiodev->func[0] = kmemdup(func, sizeof(*func), GFP_KERNEL);
-       sdiodev->func[0]->num = 0;
+       sdiodev->func[0] = NULL;
        sdiodev->func[1] = func->card->sdio_func[0];
        sdiodev->func[2] = func;
 
@@ -1045,7 +1058,6 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
 fail:
        dev_set_drvdata(&func->dev, NULL);
        dev_set_drvdata(&sdiodev->func[1]->dev, NULL);
-       kfree(sdiodev->func[0]);
        kfree(sdiodev);
        kfree(bus_if);
        return err;
@@ -1078,7 +1090,6 @@ static void brcmf_ops_sdio_remove(struct sdio_func *func)
                dev_set_drvdata(&sdiodev->func[2]->dev, NULL);
 
                kfree(bus_if);
-               kfree(sdiodev->func[0]);
                kfree(sdiodev);
        }
 
@@ -1104,7 +1115,7 @@ static int brcmf_ops_sdio_suspend(struct device *dev)
 
        func = container_of(dev, struct sdio_func, dev);
        brcmf_dbg(SDIO, "Enter: F%d\n", func->num);
-       if (func->num != SDIO_FUNC_1)
+       if (func->num != 1)
                return 0;
 
 
@@ -1133,7 +1144,7 @@ static int brcmf_ops_sdio_resume(struct device *dev)
        struct sdio_func *func = container_of(dev, struct sdio_func, dev);
 
        brcmf_dbg(SDIO, "Enter: F%d\n", func->num);
-       if (func->num != SDIO_FUNC_2)
+       if (func->num != 2)
                return 0;
 
        brcmf_sdiod_freezer_off(sdiodev);
index c5d1a1c..f7b30ce 100644 (file)
@@ -1338,6 +1338,7 @@ bool brcmf_chip_sr_capable(struct brcmf_chip *pub)
        switch (pub->chip) {
        case BRCM_CC_4354_CHIP_ID:
        case BRCM_CC_4356_CHIP_ID:
+       case BRCM_CC_4345_CHIP_ID:
                /* explicitly check SR engine enable bit */
                pmu_cc3_mask = BIT(2);
                /* fall-through */
index 5cc2d69..63bb1ab 100644 (file)
@@ -660,30 +660,6 @@ static bool data_ok(struct brcmf_sdio *bus)
               ((u8)(bus->tx_max - bus->tx_seq) & 0x80) == 0;
 }
 
-/*
- * Reads a register in the SDIO hardware block. This block occupies a series of
- * adresses on the 32 bit backplane bus.
- */
-static int r_sdreg32(struct brcmf_sdio *bus, u32 *regvar, u32 offset)
-{
-       struct brcmf_core *core = bus->sdio_core;
-       int ret;
-
-       *regvar = brcmf_sdiod_readl(bus->sdiodev, core->base + offset, &ret);
-
-       return ret;
-}
-
-static int w_sdreg32(struct brcmf_sdio *bus, u32 regval, u32 reg_offset)
-{
-       struct brcmf_core *core = bus->sdio_core;
-       int ret;
-
-       brcmf_sdiod_writel(bus->sdiodev, core->base + reg_offset, regval, &ret);
-
-       return ret;
-}
-
 static int
 brcmf_sdio_kso_control(struct brcmf_sdio *bus, bool on)
 {
@@ -1078,6 +1054,8 @@ static void brcmf_sdio_get_console_addr(struct brcmf_sdio *bus)
 
 static u32 brcmf_sdio_hostmail(struct brcmf_sdio *bus)
 {
+       struct brcmf_sdio_dev *sdiod = bus->sdiodev;
+       struct brcmf_core *core = bus->sdio_core;
        u32 intstatus = 0;
        u32 hmb_data;
        u8 fcbits;
@@ -1086,10 +1064,14 @@ static u32 brcmf_sdio_hostmail(struct brcmf_sdio *bus)
        brcmf_dbg(SDIO, "Enter\n");
 
        /* Read mailbox data and ack that we did so */
-       ret = r_sdreg32(bus, &hmb_data, SD_REG(tohostmailboxdata));
+       hmb_data = brcmf_sdiod_readl(sdiod,
+                                    core->base + SD_REG(tohostmailboxdata),
+                                    &ret);
+
+       if (!ret)
+               brcmf_sdiod_writel(sdiod, core->base + SD_REG(tosbmailbox),
+                                  SMB_INT_ACK, &ret);
 
-       if (ret == 0)
-               w_sdreg32(bus, SMB_INT_ACK, SD_REG(tosbmailbox));
        bus->sdcnt.f1regdata += 2;
 
        /* dongle indicates the firmware has halted/crashed */
@@ -1163,6 +1145,8 @@ static u32 brcmf_sdio_hostmail(struct brcmf_sdio *bus)
 
 static void brcmf_sdio_rxfail(struct brcmf_sdio *bus, bool abort, bool rtx)
 {
+       struct brcmf_sdio_dev *sdiod = bus->sdiodev;
+       struct brcmf_core *core = bus->sdio_core;
        uint retries = 0;
        u16 lastrbc;
        u8 hi, lo;
@@ -1173,7 +1157,7 @@ static void brcmf_sdio_rxfail(struct brcmf_sdio *bus, bool abort, bool rtx)
                  rtx ? ", send NAK" : "");
 
        if (abort)
-               brcmf_sdiod_abort(bus->sdiodev, SDIO_FUNC_2);
+               brcmf_sdiod_abort(bus->sdiodev, bus->sdiodev->func[2]);
 
        brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL, SFC_RF_TERM,
                           &err);
@@ -1204,7 +1188,8 @@ static void brcmf_sdio_rxfail(struct brcmf_sdio *bus, bool abort, bool rtx)
 
        if (rtx) {
                bus->sdcnt.rxrtx++;
-               err = w_sdreg32(bus, SMB_NAK, SD_REG(tosbmailbox));
+               brcmf_sdiod_writel(sdiod, core->base + SD_REG(tosbmailbox),
+                                  SMB_NAK, &err);
 
                bus->sdcnt.f1regdata++;
                if (err == 0)
@@ -1224,7 +1209,7 @@ static void brcmf_sdio_txfail(struct brcmf_sdio *bus)
        brcmf_err("sdio error, abort command and terminate frame\n");
        bus->sdcnt.tx_sderrs++;
 
-       brcmf_sdiod_abort(sdiodev, SDIO_FUNC_2);
+       brcmf_sdiod_abort(sdiodev, sdiodev->func[2]);
        brcmf_sdiod_writeb(sdiodev, SBSDIO_FUNC1_FRAMECTRL, SFC_WF_TERM, NULL);
        bus->sdcnt.f1regdata++;
 
@@ -2087,7 +2072,7 @@ static int brcmf_sdio_txpkt_prep_sg(struct brcmf_sdio *bus,
        int ntail, ret;
 
        sdiodev = bus->sdiodev;
-       blksize = sdiodev->func[SDIO_FUNC_2]->cur_blksize;
+       blksize = sdiodev->func[2]->cur_blksize;
        /* sg entry alignment should be a divisor of block size */
        WARN_ON(blksize % bus->sgentry_align);
 
@@ -2291,6 +2276,7 @@ static uint brcmf_sdio_sendfromq(struct brcmf_sdio *bus, uint maxframes)
 {
        struct sk_buff *pkt;
        struct sk_buff_head pktq;
+       u32 intstat_addr = bus->sdio_core->base + SD_REG(intstatus);
        u32 intstatus = 0;
        int ret = 0, prec_out, i;
        uint cnt = 0;
@@ -2329,7 +2315,8 @@ static uint brcmf_sdio_sendfromq(struct brcmf_sdio *bus, uint maxframes)
                if (!bus->intr) {
                        /* Check device status, signal pending interrupt */
                        sdio_claim_host(bus->sdiodev->func[1]);
-                       ret = r_sdreg32(bus, &intstatus, SD_REG(intstatus));
+                       intstatus = brcmf_sdiod_readl(bus->sdiodev,
+                                                     intstat_addr, &ret);
                        sdio_release_host(bus->sdiodev->func[1]);
                        bus->sdcnt.f2txdata++;
                        if (ret != 0)
@@ -2413,12 +2400,13 @@ static int brcmf_sdio_tx_ctrlframe(struct brcmf_sdio *bus, u8 *frame, u16 len)
 
 static void brcmf_sdio_bus_stop(struct device *dev)
 {
-       u32 local_hostintmask;
-       u8 saveclk;
-       int err;
        struct brcmf_bus *bus_if = dev_get_drvdata(dev);
        struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
        struct brcmf_sdio *bus = sdiodev->bus;
+       struct brcmf_core *core = bus->sdio_core;
+       u32 local_hostintmask;
+       u8 saveclk;
+       int err;
 
        brcmf_dbg(TRACE, "Enter\n");
 
@@ -2435,7 +2423,9 @@ static void brcmf_sdio_bus_stop(struct device *dev)
                brcmf_sdio_bus_sleep(bus, false, false);
 
                /* Disable and clear interrupts at the chip level also */
-               w_sdreg32(bus, 0, SD_REG(hostintmask));
+               brcmf_sdiod_writel(sdiodev, core->base + SD_REG(hostintmask),
+                                  0, NULL);
+
                local_hostintmask = bus->hostintmask;
                bus->hostintmask = 0;
 
@@ -2451,10 +2441,11 @@ static void brcmf_sdio_bus_stop(struct device *dev)
 
                /* Turn off the bus (F2), free any pending packets */
                brcmf_dbg(INTR, "disable SDIO interrupts\n");
-               sdio_disable_func(sdiodev->func[SDIO_FUNC_2]);
+               sdio_disable_func(sdiodev->func[2]);
 
                /* Clear any pending interrupts now that F2 is disabled */
-               w_sdreg32(bus, local_hostintmask, SD_REG(intstatus));
+               brcmf_sdiod_writel(sdiodev, core->base + SD_REG(intstatus),
+                                  local_hostintmask, NULL);
 
                sdio_release_host(sdiodev->func[1]);
        }
@@ -2494,12 +2485,12 @@ static inline void brcmf_sdio_clrintr(struct brcmf_sdio *bus)
 
 static int brcmf_sdio_intr_rstatus(struct brcmf_sdio *bus)
 {
-       struct brcmf_core *buscore = bus->sdio_core;
+       struct brcmf_core *core = bus->sdio_core;
        u32 addr;
        unsigned long val;
        int ret;
 
-       addr = buscore->base + SD_REG(intstatus);
+       addr = core->base + SD_REG(intstatus);
 
        val = brcmf_sdiod_readl(bus->sdiodev, addr, &ret);
        bus->sdcnt.f1regdata++;
@@ -2521,7 +2512,9 @@ static int brcmf_sdio_intr_rstatus(struct brcmf_sdio *bus)
 
 static void brcmf_sdio_dpc(struct brcmf_sdio *bus)
 {
+       struct brcmf_sdio_dev *sdiod = bus->sdiodev;
        u32 newstatus = 0;
+       u32 intstat_addr = bus->sdio_core->base + SD_REG(intstatus);
        unsigned long intstatus;
        uint txlimit = bus->txbound;    /* Tx frames to send before resched */
        uint framecnt;                  /* Temporary counter of tx/rx frames */
@@ -2576,9 +2569,10 @@ static void brcmf_sdio_dpc(struct brcmf_sdio *bus)
         */
        if (intstatus & I_HMB_FC_CHANGE) {
                intstatus &= ~I_HMB_FC_CHANGE;
-               err = w_sdreg32(bus, I_HMB_FC_CHANGE, SD_REG(intstatus));
+               brcmf_sdiod_writel(sdiod, intstat_addr, I_HMB_FC_CHANGE, &err);
+
+               newstatus = brcmf_sdiod_readl(sdiod, intstat_addr, &err);
 
-               err = r_sdreg32(bus, &newstatus, SD_REG(intstatus));
                bus->sdcnt.f1regdata += 2;
                atomic_set(&bus->fcstate,
                           !!(newstatus & (I_HMB_FC_STATE | I_HMB_FC_CHANGE)));
@@ -3777,15 +3771,17 @@ static u32 brcmf_sdio_buscore_read32(void *ctx, u32 addr)
        u32 val, rev;
 
        val = brcmf_sdiod_readl(sdiodev, addr, NULL);
-       if ((sdiodev->func[0]->device == SDIO_DEVICE_ID_BROADCOM_4335_4339 ||
-            sdiodev->func[0]->device == SDIO_DEVICE_ID_BROADCOM_4339) &&
-           addr == CORE_CC_REG(SI_ENUM_BASE, chipid)) {
+
+       if (addr == CORE_CC_REG(SI_ENUM_BASE, chipid) &&
+           (sdiodev->func[1]->device == SDIO_DEVICE_ID_BROADCOM_4339 ||
+            sdiodev->func[1]->device == SDIO_DEVICE_ID_BROADCOM_4335_4339)) {
                rev = (val & CID_REV_MASK) >> CID_REV_SHIFT;
                if (rev >= 2) {
                        val &= ~CID_ID_MASK;
                        val |= BRCM_CC_4339_CHIP_ID;
                }
        }
+
        return val;
 }
 
@@ -3848,6 +3844,11 @@ brcmf_sdio_probe_attach(struct brcmf_sdio *bus)
        if (!bus->sdio_core)
                goto fail;
 
+       /* Pick up the CHIPCOMMON core info struct, for bulk IO in bcmsdh.c */
+       sdiodev->cc_core = brcmf_chip_get_core(bus->ci, BCMA_CORE_CHIPCOMMON);
+       if (!sdiodev->cc_core)
+               goto fail;
+
        sdiodev->settings = brcmf_get_module_param(sdiodev->dev,
                                                   BRCMF_BUSTYPE_SDIO,
                                                   bus->ci->chip,
@@ -4017,22 +4018,21 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err,
                                         const struct firmware *code,
                                         void *nvram, u32 nvram_len)
 {
-       struct brcmf_bus *bus_if;
-       struct brcmf_sdio_dev *sdiodev;
-       struct brcmf_sdio *bus;
+       struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+       struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
+       struct brcmf_sdio *bus = sdiodev->bus;
+       struct brcmf_sdio_dev *sdiod = bus->sdiodev;
+       struct brcmf_core *core = bus->sdio_core;
        u8 saveclk;
 
        brcmf_dbg(TRACE, "Enter: dev=%s, err=%d\n", dev_name(dev), err);
-       bus_if = dev_get_drvdata(dev);
-       sdiodev = bus_if->bus_priv.sdio;
+
        if (err)
                goto fail;
 
        if (!bus_if->drvr)
                return;
 
-       bus = sdiodev->bus;
-
        /* try to download image and nvram to the dongle */
        bus->alp_only = true;
        err = brcmf_sdio_download_firmware(bus, code, nvram, nvram_len);
@@ -4063,10 +4063,10 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err,
        }
 
        /* Enable function 2 (frame transfers) */
-       w_sdreg32(bus, SDPCM_PROT_VERSION << SMB_DATA_VERSION_SHIFT,
-                 SD_REG(tosbmailboxdata));
-       err = sdio_enable_func(sdiodev->func[SDIO_FUNC_2]);
+       brcmf_sdiod_writel(sdiod, core->base + SD_REG(tosbmailboxdata),
+                          SDPCM_PROT_VERSION << SMB_DATA_VERSION_SHIFT, NULL);
 
+       err = sdio_enable_func(sdiodev->func[2]);
 
        brcmf_dbg(INFO, "enable F2: err=%d\n", err);
 
@@ -4074,12 +4074,14 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err,
        if (!err) {
                /* Set up the interrupt mask and enable interrupts */
                bus->hostintmask = HOSTINTMASK;
-               w_sdreg32(bus, bus->hostintmask, SD_REG(hostintmask));
+               brcmf_sdiod_writel(sdiod, core->base + SD_REG(hostintmask),
+                                  bus->hostintmask, NULL);
+
 
                brcmf_sdiod_writeb(sdiodev, SBSDIO_WATERMARK, 8, &err);
        } else {
                /* Disable F2 again */
-               sdio_disable_func(sdiodev->func[SDIO_FUNC_2]);
+               sdio_disable_func(sdiodev->func[2]);
                goto release;
        }
 
@@ -4215,7 +4217,7 @@ struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
        sdio_claim_host(bus->sdiodev->func[1]);
 
        /* Disable F2 to clear any intermediate frame state on the dongle */
-       sdio_disable_func(bus->sdiodev->func[SDIO_FUNC_2]);
+       sdio_disable_func(bus->sdiodev->func[2]);
 
        bus->rxflow = false;
 
index 01def16..04661ec 100644 (file)
@@ -21,9 +21,8 @@
 #include <linux/firmware.h>
 #include "firmware.h"
 
-#define SDIO_FUNC_0            0
-#define SDIO_FUNC_1            1
-#define SDIO_FUNC_2            2
+/* Maximum number of I/O funcs */
+#define NUM_SDIO_FUNCS 3
 
 #define SDIOD_FBR_SIZE         0x100
 
 #define INTR_STATUS_FUNC1      0x2
 #define INTR_STATUS_FUNC2      0x4
 
-/* Maximum number of I/O funcs */
-#define SDIOD_MAX_IOFUNCS      7
-
 /* mask of register map */
 #define REG_F0_REG_MASK                0x7FF
 #define REG_F1_MISC_MASK       0x1FFFF
 
-/* as of sdiod rev 0, supports 3 functions */
-#define SBSDIO_NUM_FUNCTION            3
-
 /* function 0 vendor specific CCCR registers */
 
 #define SDIO_CCCR_BRCM_CARDCAP                 0xf0
 #define SDIO_CCCR_BRCM_CARDCAP_CMD14_EXT       BIT(2)
 #define SDIO_CCCR_BRCM_CARDCAP_CMD_NODEC       BIT(3)
 
+/* Interrupt enable bits for each function */
+#define SDIO_CCCR_IEN_FUNC0                    BIT(0)
+#define SDIO_CCCR_IEN_FUNC1                    BIT(1)
+#define SDIO_CCCR_IEN_FUNC2                    BIT(2)
+
 #define SDIO_CCCR_BRCM_CARDCTRL                        0xf1
 #define SDIO_CCCR_BRCM_CARDCTRL_WLANRESET      BIT(1)
 
@@ -175,9 +173,10 @@ struct brcmf_sdio;
 struct brcmf_sdiod_freezer;
 
 struct brcmf_sdio_dev {
-       struct sdio_func *func[SDIO_MAX_FUNCS];
+       struct sdio_func *func[NUM_SDIO_FUNCS];
        u8 num_funcs;                   /* Supported funcs on client */
        u32 sbwad;                      /* Save backplane window address */
+       struct brcmf_core *cc_core;     /* chipcommon core info struct */
        struct brcmf_sdio *bus;
        struct device *dev;
        struct brcmf_bus *bus_if;
@@ -296,10 +295,10 @@ void brcmf_sdiod_intr_unregister(struct brcmf_sdio_dev *sdiodev);
 /* SDIO device register access interface */
 /* Accessors for SDIO Function 0 */
 #define brcmf_sdiod_func0_rb(sdiodev, addr, r) \
-       sdio_readb((sdiodev)->func[0], (addr), (r))
+       sdio_f0_readb((sdiodev)->func[1], (addr), (r))
 
 #define brcmf_sdiod_func0_wb(sdiodev, addr, v, ret) \
-       sdio_writeb((sdiodev)->func[0], (v), (addr), (ret))
+       sdio_f0_writeb((sdiodev)->func[1], (v), (addr), (ret))
 
 /* Accessors for SDIO Function 1 */
 #define brcmf_sdiod_readb(sdiodev, addr, r) \
@@ -350,7 +349,8 @@ int brcmf_sdiod_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
                      u8 *data, uint size);
 
 /* Issue an abort to the specified function */
-int brcmf_sdiod_abort(struct brcmf_sdio_dev *sdiodev, u8 fn);
+int brcmf_sdiod_abort(struct brcmf_sdio_dev *sdiodev, struct sdio_func *func);
+
 void brcmf_sdiod_sgtable_alloc(struct brcmf_sdio_dev *sdiodev);
 void brcmf_sdiod_change_state(struct brcmf_sdio_dev *sdiodev,
                              enum brcmf_sdiod_state state);
index 10b075a..7836737 100644 (file)
@@ -684,6 +684,7 @@ static void hwsim_send_nullfunc(struct mac80211_hwsim_data *data, u8 *mac,
        hdr = skb_put(skb, sizeof(*hdr) - ETH_ALEN);
        hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
                                         IEEE80211_STYPE_NULLFUNC |
+                                        IEEE80211_FCTL_TODS |
                                         (ps ? IEEE80211_FCTL_PM : 0));
        hdr->duration_id = cpu_to_le16(0);
        memcpy(hdr->addr1, vp->bssid, ETH_ALEN);
@@ -727,16 +728,21 @@ static int hwsim_fops_ps_write(void *dat, u64 val)
            val != PS_MANUAL_POLL)
                return -EINVAL;
 
-       old_ps = data->ps;
-       data->ps = val;
-
-       local_bh_disable();
        if (val == PS_MANUAL_POLL) {
+               if (data->ps != PS_ENABLED)
+                       return -EINVAL;
+               local_bh_disable();
                ieee80211_iterate_active_interfaces_atomic(
                        data->hw, IEEE80211_IFACE_ITER_NORMAL,
                        hwsim_send_ps_poll, data);
-               data->ps_poll_pending = true;
-       } else if (old_ps == PS_DISABLED && val != PS_DISABLED) {
+               local_bh_enable();
+               return 0;
+       }
+       old_ps = data->ps;
+       data->ps = val;
+
+       local_bh_disable();
+       if (old_ps == PS_DISABLED && val != PS_DISABLED) {
                ieee80211_iterate_active_interfaces_atomic(
                        data->hw, IEEE80211_IFACE_ITER_NORMAL,
                        hwsim_send_nullfunc_ps, data);
@@ -3215,7 +3221,7 @@ static int hwsim_get_radio_nl(struct sk_buff *msg, struct genl_info *info)
                if (!net_eq(wiphy_net(data->hw->wiphy), genl_info_net(info)))
                        continue;
 
-               skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+               skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
                if (!skb) {
                        res = -ENOMEM;
                        goto out_err;
index dcc529e..8746600 100644 (file)
@@ -290,13 +290,16 @@ static int mwifiex_dnld_cmd_to_fw(struct mwifiex_private *priv,
        adapter->dbg.last_cmd_act[adapter->dbg.last_cmd_index] =
                        get_unaligned_le16((u8 *)host_cmd + S_DS_GEN);
 
+       /* Setup the timer after transmit command, except that specific
+        * command might not have command response.
+        */
+       if (cmd_code != HostCmd_CMD_FW_DUMP_EVENT)
+               mod_timer(&adapter->cmd_timer,
+                         jiffies + msecs_to_jiffies(MWIFIEX_TIMER_10S));
+
        /* Clear BSS_NO_BITS from HostCmd */
        cmd_code &= HostCmd_CMD_ID_MASK;
 
-       /* Setup the timer after transmit command */
-       mod_timer(&adapter->cmd_timer,
-                 jiffies + msecs_to_jiffies(MWIFIEX_TIMER_10S));
-
        return 0;
 }
 
index 6f4239b..db2872d 100644 (file)
@@ -168,10 +168,15 @@ mwifiex_device_dump_read(struct file *file, char __user *ubuf,
 {
        struct mwifiex_private *priv = file->private_data;
 
-       if (!priv->adapter->if_ops.device_dump)
-               return -EIO;
-
-       priv->adapter->if_ops.device_dump(priv->adapter);
+       /* For command timeouts, USB firmware will automatically emit
+        * firmware dump events, so we don't implement device_dump().
+        * For user-initiated dumps, we trigger it ourselves.
+        */
+       if (priv->adapter->iface_type == MWIFIEX_USB)
+               mwifiex_send_cmd(priv, HostCmd_CMD_FW_DUMP_EVENT,
+                                HostCmd_ACT_GEN_SET, 0, NULL, true);
+       else
+               priv->adapter->if_ops.device_dump(priv->adapter);
 
        return 0;
 }
index 13cd58e..9c2cdef 100644 (file)
@@ -56,6 +56,15 @@ struct mwifiex_fw_data {
        u8 data[1];
 } __packed;
 
+struct mwifiex_fw_dump_header {
+       __le16          seq_num;
+       __le16          reserved;
+       __le16          type;
+       __le16          len;
+} __packed;
+
+#define FW_DUMP_INFO_ENDED 0x0002
+
 #define MWIFIEX_FW_DNLD_CMD_1 0x1
 #define MWIFIEX_FW_DNLD_CMD_5 0x5
 #define MWIFIEX_FW_DNLD_CMD_6 0x6
@@ -400,6 +409,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
 #define HostCmd_CMD_TDLS_CONFIG                       0x0100
 #define HostCmd_CMD_MC_POLICY                         0x0121
 #define HostCmd_CMD_TDLS_OPER                         0x0122
+#define HostCmd_CMD_FW_DUMP_EVENT                    0x0125
 #define HostCmd_CMD_SDIO_SP_RX_AGGR_CFG               0x0223
 #define HostCmd_CMD_CHAN_REGION_CFG                  0x0242
 #define HostCmd_CMD_PACKET_AGGR_CTRL                 0x0251
@@ -570,6 +580,7 @@ enum mwifiex_channel_flags {
 #define EVENT_BG_SCAN_STOPPED           0x00000065
 #define EVENT_REMAIN_ON_CHAN_EXPIRED    0x0000005f
 #define EVENT_MULTI_CHAN_INFO           0x0000006a
+#define EVENT_FW_DUMP_INFO             0x00000073
 #define EVENT_TX_STATUS_REPORT         0x00000074
 #define EVENT_BT_COEX_WLAN_PARA_CHANGE 0X00000076
 
index e1aa860..d239e92 100644 (file)
@@ -64,6 +64,13 @@ static void wakeup_timer_fn(struct timer_list *t)
                adapter->if_ops.card_reset(adapter);
 }
 
+static void fw_dump_timer_fn(struct timer_list *t)
+{
+       struct mwifiex_adapter *adapter = from_timer(adapter, t, devdump_timer);
+
+       mwifiex_upload_device_dump(adapter);
+}
+
 /*
  * This function initializes the private structure and sets default
  * values to the members.
@@ -314,6 +321,8 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter)
        adapter->iface_limit.p2p_intf = MWIFIEX_MAX_P2P_NUM;
        adapter->active_scan_triggered = false;
        timer_setup(&adapter->wakeup_timer, wakeup_timer_fn, 0);
+       adapter->devdump_len = 0;
+       timer_setup(&adapter->devdump_timer, fw_dump_timer_fn, 0);
 }
 
 /*
@@ -396,6 +405,7 @@ static void
 mwifiex_adapter_cleanup(struct mwifiex_adapter *adapter)
 {
        del_timer(&adapter->wakeup_timer);
+       del_timer_sync(&adapter->devdump_timer);
        mwifiex_cancel_all_pending_cmd(adapter);
        wake_up_interruptible(&adapter->cmd_wait_q.wait);
        wake_up_interruptible(&adapter->hs_activate_wait_q);
index a96bd7e..12e7399 100644 (file)
@@ -1051,9 +1051,30 @@ void mwifiex_multi_chan_resync(struct mwifiex_adapter *adapter)
 }
 EXPORT_SYMBOL_GPL(mwifiex_multi_chan_resync);
 
-int mwifiex_drv_info_dump(struct mwifiex_adapter *adapter, void **drv_info)
+void mwifiex_upload_device_dump(struct mwifiex_adapter *adapter)
 {
-       void *p;
+       /* Dump all the memory data into single file, a userspace script will
+        * be used to split all the memory data to multiple files
+        */
+       mwifiex_dbg(adapter, MSG,
+                   "== mwifiex dump information to /sys/class/devcoredump start\n");
+       dev_coredumpv(adapter->dev, adapter->devdump_data, adapter->devdump_len,
+                     GFP_KERNEL);
+       mwifiex_dbg(adapter, MSG,
+                   "== mwifiex dump information to /sys/class/devcoredump end\n");
+
+       /* Device dump data will be freed in device coredump release function
+        * after 5 min. Here reset adapter->devdump_data and ->devdump_len
+        * to avoid it been accidentally reused.
+        */
+       adapter->devdump_data = NULL;
+       adapter->devdump_len = 0;
+}
+EXPORT_SYMBOL_GPL(mwifiex_upload_device_dump);
+
+void mwifiex_drv_info_dump(struct mwifiex_adapter *adapter)
+{
+       char *p;
        char drv_version[64];
        struct usb_card_rec *cardp;
        struct sdio_mmc_card *sdio_card;
@@ -1061,17 +1082,12 @@ int mwifiex_drv_info_dump(struct mwifiex_adapter *adapter, void **drv_info)
        int i, idx;
        struct netdev_queue *txq;
        struct mwifiex_debug_info *debug_info;
-       void *drv_info_dump;
 
        mwifiex_dbg(adapter, MSG, "===mwifiex driverinfo dump start===\n");
 
-       /* memory allocate here should be free in mwifiex_upload_device_dump*/
-       drv_info_dump = vzalloc(MWIFIEX_DRV_INFO_SIZE_MAX);
-
-       if (!drv_info_dump)
-               return 0;
-
-       p = (char *)(drv_info_dump);
+       p = adapter->devdump_data;
+       strcpy(p, "========Start dump driverinfo========\n");
+       p += strlen("========Start dump driverinfo========\n");
        p += sprintf(p, "driver_name = " "\"mwifiex\"\n");
 
        mwifiex_drv_get_driver_version(adapter, drv_version,
@@ -1155,21 +1171,18 @@ int mwifiex_drv_info_dump(struct mwifiex_adapter *adapter, void **drv_info)
                kfree(debug_info);
        }
 
+       strcpy(p, "\n========End dump========\n");
+       p += strlen("\n========End dump========\n");
        mwifiex_dbg(adapter, MSG, "===mwifiex driverinfo dump end===\n");
-       *drv_info = drv_info_dump;
-       return p - drv_info_dump;
+       adapter->devdump_len = p - (char *)adapter->devdump_data;
 }
 EXPORT_SYMBOL_GPL(mwifiex_drv_info_dump);
 
-void mwifiex_upload_device_dump(struct mwifiex_adapter *adapter, void *drv_info,
-                               int drv_info_size)
+void mwifiex_prepare_fw_dump_info(struct mwifiex_adapter *adapter)
 {
-       u8 idx, *dump_data, *fw_dump_ptr;
-       u32 dump_len;
-
-       dump_len = (strlen("========Start dump driverinfo========\n") +
-                      drv_info_size +
-                      strlen("\n========End dump========\n"));
+       u8 idx;
+       char *fw_dump_ptr;
+       u32 dump_len = 0;
 
        for (idx = 0; idx < adapter->num_mem_types; idx++) {
                struct memory_type_mapping *entry =
@@ -1184,24 +1197,24 @@ void mwifiex_upload_device_dump(struct mwifiex_adapter *adapter, void *drv_info,
                }
        }
 
-       dump_data = vzalloc(dump_len + 1);
-       if (!dump_data)
-               goto done;
-
-       fw_dump_ptr = dump_data;
+       if (dump_len + 1 + adapter->devdump_len > MWIFIEX_FW_DUMP_SIZE) {
+               /* Realloc in case buffer overflow */
+               fw_dump_ptr = vzalloc(dump_len + 1 + adapter->devdump_len);
+               mwifiex_dbg(adapter, MSG, "Realloc device dump data.\n");
+               if (!fw_dump_ptr) {
+                       vfree(adapter->devdump_data);
+                       mwifiex_dbg(adapter, ERROR,
+                                   "vzalloc devdump data failure!\n");
+                       return;
+               }
 
-       /* Dump all the memory data into single file, a userspace script will
-        * be used to split all the memory data to multiple files
-        */
-       mwifiex_dbg(adapter, MSG,
-                   "== mwifiex dump information to /sys/class/devcoredump start");
+               memmove(fw_dump_ptr, adapter->devdump_data,
+                       adapter->devdump_len);
+               vfree(adapter->devdump_data);
+               adapter->devdump_data = fw_dump_ptr;
+       }
 
-       strcpy(fw_dump_ptr, "========Start dump driverinfo========\n");
-       fw_dump_ptr += strlen("========Start dump driverinfo========\n");
-       memcpy(fw_dump_ptr, drv_info, drv_info_size);
-       fw_dump_ptr += drv_info_size;
-       strcpy(fw_dump_ptr, "\n========End dump========\n");
-       fw_dump_ptr += strlen("\n========End dump========\n");
+       fw_dump_ptr = (char *)adapter->devdump_data + adapter->devdump_len;
 
        for (idx = 0; idx < adapter->num_mem_types; idx++) {
                struct memory_type_mapping *entry =
@@ -1225,14 +1238,8 @@ void mwifiex_upload_device_dump(struct mwifiex_adapter *adapter, void *drv_info,
                }
        }
 
-       /* device dump data will be free in device coredump release function
-        * after 5 min
-        */
-       dev_coredumpv(adapter->dev, dump_data, dump_len, GFP_KERNEL);
-       mwifiex_dbg(adapter, MSG,
-                   "== mwifiex dump information to /sys/class/devcoredump end");
+       adapter->devdump_len = fw_dump_ptr - (char *)adapter->devdump_data;
 
-done:
        for (idx = 0; idx < adapter->num_mem_types; idx++) {
                struct memory_type_mapping *entry =
                        &adapter->mem_type_mapping_tbl[idx];
@@ -1241,10 +1248,8 @@ done:
                entry->mem_ptr = NULL;
                entry->mem_size = 0;
        }
-
-       vfree(drv_info);
 }
-EXPORT_SYMBOL_GPL(mwifiex_upload_device_dump);
+EXPORT_SYMBOL_GPL(mwifiex_prepare_fw_dump_info);
 
 /*
  * CFG802.11 network device handler for statistics retrieval.
index 154c079..6b5539b 100644 (file)
@@ -94,6 +94,8 @@ enum {
 
 #define MAX_EVENT_SIZE                  2048
 
+#define MWIFIEX_FW_DUMP_SIZE       (2 * 1024 * 1024)
+
 #define ARP_FILTER_MAX_BUF_SIZE         68
 
 #define MWIFIEX_KEY_BUFFER_SIZE                        16
@@ -1032,6 +1034,10 @@ struct mwifiex_adapter {
        bool wake_by_wifi;
        /* Aggregation parameters*/
        struct bus_aggr_params bus_aggr;
+       /* Device dump data/length */
+       void *devdump_data;
+       int devdump_len;
+       struct timer_list devdump_timer;
 };
 
 void mwifiex_process_tx_queue(struct mwifiex_adapter *adapter);
@@ -1656,9 +1662,9 @@ void mwifiex_hist_data_add(struct mwifiex_private *priv,
 u8 mwifiex_adjust_data_rate(struct mwifiex_private *priv,
                            u8 rx_rate, u8 ht_info);
 
-int mwifiex_drv_info_dump(struct mwifiex_adapter *adapter, void **drv_info);
-void mwifiex_upload_device_dump(struct mwifiex_adapter *adapter, void *drv_info,
-                               int drv_info_size);
+void mwifiex_drv_info_dump(struct mwifiex_adapter *adapter);
+void mwifiex_prepare_fw_dump_info(struct mwifiex_adapter *adapter);
+void mwifiex_upload_device_dump(struct mwifiex_adapter *adapter);
 void *mwifiex_alloc_dma_align_buf(int rx_len, gfp_t flags);
 void mwifiex_queue_main_work(struct mwifiex_adapter *adapter);
 int mwifiex_get_wakeup_reason(struct mwifiex_private *priv, u16 action,
@@ -1677,6 +1683,7 @@ void mwifiex_process_multi_chan_event(struct mwifiex_private *priv,
 void mwifiex_multi_chan_resync(struct mwifiex_adapter *adapter);
 int mwifiex_set_mac_address(struct mwifiex_private *priv,
                            struct net_device *dev);
+void mwifiex_devdump_tmo_func(unsigned long function_context);
 
 #ifdef CONFIG_DEBUG_FS
 void mwifiex_debugfs_init(void);
index cd31494..23209c5 100644 (file)
@@ -310,6 +310,8 @@ static void mwifiex_pcie_remove(struct pci_dev *pdev)
                mwifiex_init_shutdown_fw(priv, MWIFIEX_FUNC_SHUTDOWN);
        }
 
+       cancel_work_sync(&card->work);
+
        mwifiex_remove_card(adapter);
 }
 
@@ -2769,12 +2771,17 @@ static void mwifiex_pcie_fw_dump(struct mwifiex_adapter *adapter)
 
 static void mwifiex_pcie_device_dump_work(struct mwifiex_adapter *adapter)
 {
-       int drv_info_size;
-       void *drv_info;
+       adapter->devdump_data = vzalloc(MWIFIEX_FW_DUMP_SIZE);
+       if (!adapter->devdump_data) {
+               mwifiex_dbg(adapter, ERROR,
+                           "vzalloc devdump data failure!\n");
+               return;
+       }
 
-       drv_info_size = mwifiex_drv_info_dump(adapter, &drv_info);
+       mwifiex_drv_info_dump(adapter);
        mwifiex_pcie_fw_dump(adapter);
-       mwifiex_upload_device_dump(adapter, drv_info, drv_info_size);
+       mwifiex_prepare_fw_dump_info(adapter);
+       mwifiex_upload_device_dump(adapter);
 }
 
 static void mwifiex_pcie_card_reset_work(struct mwifiex_adapter *adapter)
index fd5183c..2488587 100644 (file)
@@ -399,6 +399,8 @@ mwifiex_sdio_remove(struct sdio_func *func)
                mwifiex_init_shutdown_fw(priv, MWIFIEX_FUNC_SHUTDOWN);
        }
 
+       cancel_work_sync(&card->work);
+
        mwifiex_remove_card(adapter);
 }
 
@@ -2505,15 +2507,21 @@ done:
 static void mwifiex_sdio_device_dump_work(struct mwifiex_adapter *adapter)
 {
        struct sdio_mmc_card *card = adapter->card;
-       int drv_info_size;
-       void *drv_info;
 
-       drv_info_size = mwifiex_drv_info_dump(adapter, &drv_info);
+       adapter->devdump_data = vzalloc(MWIFIEX_FW_DUMP_SIZE);
+       if (!adapter->devdump_data) {
+               mwifiex_dbg(adapter, ERROR,
+                           "vzalloc devdump data failure!\n");
+               return;
+       }
+
+       mwifiex_drv_info_dump(adapter);
        if (card->fw_dump_enh)
                mwifiex_sdio_generic_fw_dump(adapter);
        else
                mwifiex_sdio_fw_dump(adapter);
-       mwifiex_upload_device_dump(adapter, drv_info, drv_info_size);
+       mwifiex_prepare_fw_dump_info(adapter);
+       mwifiex_upload_device_dump(adapter);
 }
 
 static void mwifiex_sdio_work(struct work_struct *work)
index fb09014..211e47d 100644 (file)
@@ -2206,6 +2206,10 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
        case HostCmd_CMD_CHAN_REGION_CFG:
                ret = mwifiex_cmd_chan_region_cfg(priv, cmd_ptr, cmd_action);
                break;
+       case HostCmd_CMD_FW_DUMP_EVENT:
+               cmd_ptr->command = cpu_to_le16(cmd_no);
+               cmd_ptr->size = cpu_to_le16(S_DS_GEN);
+               break;
        default:
                mwifiex_dbg(priv->adapter, ERROR,
                            "PREP_CMD: unknown cmd- %#x\n", cmd_no);
index d8db412..93dfb76 100644 (file)
@@ -584,6 +584,62 @@ void mwifiex_bt_coex_wlan_param_update_event(struct mwifiex_private *priv,
                adapter->coex_rx_win_size);
 }
 
+static void
+mwifiex_fw_dump_info_event(struct mwifiex_private *priv,
+                          struct sk_buff *event_skb)
+{
+       struct mwifiex_adapter *adapter = priv->adapter;
+       struct mwifiex_fw_dump_header *fw_dump_hdr =
+                               (void *)adapter->event_body;
+
+       if (adapter->iface_type != MWIFIEX_USB) {
+               mwifiex_dbg(adapter, MSG,
+                           "event is not on usb interface, ignore it\n");
+               return;
+       }
+
+       if (!adapter->devdump_data) {
+               /* When receive the first event, allocate device dump
+                * buffer, dump driver info.
+                */
+               adapter->devdump_data = vzalloc(MWIFIEX_FW_DUMP_SIZE);
+               if (!adapter->devdump_data) {
+                       mwifiex_dbg(adapter, ERROR,
+                                   "vzalloc devdump data failure!\n");
+                       return;
+               }
+
+               mwifiex_drv_info_dump(adapter);
+
+               /* If no proceeded event arrive in 10s, upload device
+                * dump data, this will be useful if the end of
+                * transmission event get lost, in this cornel case,
+                * user would still get partial of the dump.
+                */
+               mod_timer(&adapter->devdump_timer,
+                         jiffies + msecs_to_jiffies(MWIFIEX_TIMER_10S));
+       }
+
+       /* Overflow check */
+       if (adapter->devdump_len + event_skb->len >= MWIFIEX_FW_DUMP_SIZE)
+               goto upload_dump;
+
+       memmove(adapter->devdump_data + adapter->devdump_len,
+               adapter->event_skb->data, event_skb->len);
+       adapter->devdump_len += event_skb->len;
+
+       if (le16_to_cpu(fw_dump_hdr->type == FW_DUMP_INFO_ENDED)) {
+               mwifiex_dbg(adapter, MSG,
+                           "receive end of transmission flag event!\n");
+               goto upload_dump;
+       }
+       return;
+
+upload_dump:
+       del_timer_sync(&adapter->devdump_timer);
+       mwifiex_upload_device_dump(adapter);
+}
+
 /*
  * This function handles events generated by firmware.
  *
@@ -636,6 +692,7 @@ void mwifiex_bt_coex_wlan_param_update_event(struct mwifiex_private *priv,
  *      - EVENT_DELBA
  *      - EVENT_BA_STREAM_TIEMOUT
  *      - EVENT_AMSDU_AGGR_CTRL
+ *      - EVENT_FW_DUMP_INFO
  */
 int mwifiex_process_sta_event(struct mwifiex_private *priv)
 {
@@ -1007,6 +1064,10 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
                                            adapter->event_skb->len -
                                            sizeof(eventcause));
                break;
+       case EVENT_FW_DUMP_INFO:
+               mwifiex_dbg(adapter, EVENT, "event: firmware debug info\n");
+               mwifiex_fw_dump_info_event(priv, adapter->event_skb);
+               break;
        /* Debugging event; not used, but let's not print an ERROR for it. */
        case EVENT_UNKNOWN_DEBUG:
                mwifiex_dbg(adapter, EVENT, "event: debug\n");
index 7c3612a..c121b50 100644 (file)
@@ -33,7 +33,8 @@ mt76_reg_get(void *data, u64 *val)
        return 0;
 }
 
-DEFINE_SIMPLE_ATTRIBUTE(fops_regval, mt76_reg_get, mt76_reg_set, "0x%08llx\n");
+DEFINE_DEBUGFS_ATTRIBUTE(fops_regval, mt76_reg_get, mt76_reg_set,
+                        "0x%08llx\n");
 
 static int
 mt76_queues_read(struct seq_file *s, void *data)
@@ -65,8 +66,8 @@ struct dentry *mt76_register_debugfs(struct mt76_dev *dev)
 
        debugfs_create_u8("led_pin", S_IRUSR | S_IWUSR, dir, &dev->led_pin);
        debugfs_create_u32("regidx", S_IRUSR | S_IWUSR, dir, &dev->debugfs_reg);
-       debugfs_create_file("regval", S_IRUSR | S_IWUSR, dir, dev,
-                           &fops_regval);
+       debugfs_create_file_unsafe("regval", S_IRUSR | S_IWUSR, dir, dev,
+                                  &fops_regval);
        debugfs_create_blob("eeprom", S_IRUSR, dir, &dev->eeprom);
        if (dev->otp.data)
                debugfs_create_blob("otp", S_IRUSR, dir, &dev->otp);
index 440b7e7..9c9bf3e 100644 (file)
@@ -425,12 +425,13 @@ mt76x2_rate_power_val(u8 val)
        return mt76x2_sign_extend_optional(val, 7);
 }
 
-void mt76x2_get_rate_power(struct mt76x2_dev *dev, struct mt76_rate_power *t)
+void mt76x2_get_rate_power(struct mt76x2_dev *dev, struct mt76_rate_power *t,
+                          struct ieee80211_channel *chan)
 {
        bool is_5ghz;
        u16 val;
 
-       is_5ghz = dev->mt76.chandef.chan->band == NL80211_BAND_5GHZ;
+       is_5ghz = chan->band == NL80211_BAND_5GHZ;
 
        memset(t, 0, sizeof(*t));
 
@@ -482,11 +483,22 @@ void mt76x2_get_rate_power(struct mt76x2_dev *dev, struct mt76_rate_power *t)
        t->vht[8] = t->vht[9] = mt76x2_rate_power_val(val >> 8);
 }
 
+int mt76x2_get_max_rate_power(struct mt76_rate_power *r)
+{
+       int i;
+       s8 ret = 0;
+
+       for (i = 0; i < sizeof(r->all); i++)
+               ret = max(ret, r->all[i]);
+
+       return ret;
+}
+
 static void
 mt76x2_get_power_info_2g(struct mt76x2_dev *dev, struct mt76x2_tx_power_info *t,
-                      int chain, int offset)
+                        struct ieee80211_channel *chan, int chain, int offset)
 {
-       int channel = dev->mt76.chandef.chan->hw_value;
+       int channel = chan->hw_value;
        int delta_idx;
        u8 data[6];
        u16 val;
@@ -511,9 +523,9 @@ mt76x2_get_power_info_2g(struct mt76x2_dev *dev, struct mt76x2_tx_power_info *t,
 
 static void
 mt76x2_get_power_info_5g(struct mt76x2_dev *dev, struct mt76x2_tx_power_info *t,
-                      int chain, int offset)
+                        struct ieee80211_channel *chan, int chain, int offset)
 {
-       int channel = dev->mt76.chandef.chan->hw_value;
+       int channel = chan->hw_value;
        enum mt76x2_cal_channel_group group;
        int delta_idx;
        u16 val;
@@ -524,7 +536,7 @@ mt76x2_get_power_info_5g(struct mt76x2_dev *dev, struct mt76x2_tx_power_info *t,
 
        if (channel >= 192)
                delta_idx = 4;
-       else if (channel >= 484)
+       else if (channel >= 184)
                delta_idx = 3;
        else if (channel < 44)
                delta_idx = 3;
@@ -559,7 +571,8 @@ mt76x2_get_power_info_5g(struct mt76x2_dev *dev, struct mt76x2_tx_power_info *t,
 }
 
 void mt76x2_get_power_info(struct mt76x2_dev *dev,
-                          struct mt76x2_tx_power_info *t)
+                          struct mt76x2_tx_power_info *t,
+                          struct ieee80211_channel *chan)
 {
        u16 bw40, bw80;
 
@@ -568,13 +581,17 @@ void mt76x2_get_power_info(struct mt76x2_dev *dev,
        bw40 = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_DELTA_BW40);
        bw80 = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_DELTA_BW80);
 
-       if (dev->mt76.chandef.chan->band == NL80211_BAND_5GHZ) {
+       if (chan->band == NL80211_BAND_5GHZ) {
                bw40 >>= 8;
-               mt76x2_get_power_info_5g(dev, t, 0, MT_EE_TX_POWER_0_START_5G);
-               mt76x2_get_power_info_5g(dev, t, 1, MT_EE_TX_POWER_1_START_5G);
+               mt76x2_get_power_info_5g(dev, t, chan, 0,
+                                        MT_EE_TX_POWER_0_START_5G);
+               mt76x2_get_power_info_5g(dev, t, chan, 1,
+                                        MT_EE_TX_POWER_1_START_5G);
        } else {
-               mt76x2_get_power_info_2g(dev, t, 0, MT_EE_TX_POWER_0_START_2G);
-               mt76x2_get_power_info_2g(dev, t, 1, MT_EE_TX_POWER_1_START_2G);
+               mt76x2_get_power_info_2g(dev, t, chan, 0,
+                                        MT_EE_TX_POWER_0_START_2G);
+               mt76x2_get_power_info_2g(dev, t, chan, 1,
+                                        MT_EE_TX_POWER_1_START_2G);
        }
 
        if (mt76x2_tssi_enabled(dev) || !field_valid(t->target_power))
index 063d6c8..d791227 100644 (file)
@@ -146,9 +146,12 @@ mt76x2_eeprom_get(struct mt76x2_dev *dev, enum mt76x2_eeprom_field field)
        return get_unaligned_le16(dev->mt76.eeprom.data + field);
 }
 
-void mt76x2_get_rate_power(struct mt76x2_dev *dev, struct mt76_rate_power *t);
+void mt76x2_get_rate_power(struct mt76x2_dev *dev, struct mt76_rate_power *t,
+                          struct ieee80211_channel *chan);
+int mt76x2_get_max_rate_power(struct mt76_rate_power *r);
 void mt76x2_get_power_info(struct mt76x2_dev *dev,
-                          struct mt76x2_tx_power_info *t);
+                          struct mt76x2_tx_power_info *t,
+                          struct ieee80211_channel *chan);
 int mt76x2_get_temp_comp(struct mt76x2_dev *dev, struct mt76x2_temp_comp *t);
 bool mt76x2_ext_pa_enabled(struct mt76x2_dev *dev, enum nl80211_band band);
 void mt76x2_read_rx_gain(struct mt76x2_dev *dev);
index d3f03a8..4373a2b 100644 (file)
@@ -308,8 +308,6 @@ int mt76x2_mac_reset(struct mt76x2_dev *dev, bool hard)
        for (i = 0; i < 16; i++)
                mt76_rr(dev, MT_TX_STAT_FIFO);
 
-       mt76_set(dev, MT_MAC_APC_BSSID_H(0), MT_MAC_APC_BSSID0_H_EN);
-
        mt76_wr(dev, MT_CH_TIME_CFG,
                MT_CH_TIME_CFG_TIMER_EN |
                MT_CH_TIME_CFG_TX_AS_BUSY |
@@ -586,6 +584,8 @@ int mt76x2_init_hardware(struct mt76x2_dev *dev)
        if (ret)
                return ret;
 
+       dev->rxfilter = mt76_rr(dev, MT_RX_FILTR_CFG);
+
        ret = mt76x2_dma_init(dev);
        if (ret)
                return ret;
@@ -600,7 +600,6 @@ int mt76x2_init_hardware(struct mt76x2_dev *dev)
                return ret;
 
        mt76x2_mac_stop(dev, false);
-       dev->rxfilter = mt76_rr(dev, MT_RX_FILTR_CFG);
 
        return 0;
 }
@@ -760,6 +759,37 @@ static void mt76x2_led_set_brightness(struct led_classdev *led_cdev,
                mt76x2_led_set_config(mt76, 0xff, 0);
 }
 
+static void
+mt76x2_init_txpower(struct mt76x2_dev *dev,
+                   struct ieee80211_supported_band *sband)
+{
+       struct ieee80211_channel *chan;
+       struct mt76x2_tx_power_info txp;
+       struct mt76_rate_power t = {};
+       int target_power;
+       int i;
+
+       for (i = 0; i < sband->n_channels; i++) {
+               chan = &sband->channels[i];
+
+               mt76x2_get_power_info(dev, &txp, chan);
+
+               target_power = max_t(int, (txp.chain[0].target_power +
+                                          txp.chain[0].delta),
+                                         (txp.chain[1].target_power +
+                                          txp.chain[1].delta));
+
+               mt76x2_get_rate_power(dev, &t, chan);
+
+               chan->max_power = mt76x2_get_max_rate_power(&t) +
+                                 target_power;
+               chan->max_power /= 2;
+
+               /* convert to combined output power on 2x2 devices */
+               chan->max_power += 3;
+       }
+}
+
 int mt76x2_register_device(struct mt76x2_dev *dev)
 {
        struct ieee80211_hw *hw = mt76_hw(dev);
@@ -828,6 +858,8 @@ int mt76x2_register_device(struct mt76x2_dev *dev)
                goto fail;
 
        mt76x2_init_debugfs(dev);
+       mt76x2_init_txpower(dev, &dev->mt76.sband_2g.sband);
+       mt76x2_init_txpower(dev, &dev->mt76.sband_5g.sband);
 
        return 0;
 
index 39fc1d7..ecc23f5 100644 (file)
@@ -28,7 +28,7 @@ void mt76x2_mac_set_bssid(struct mt76x2_dev *dev, u8 idx, const u8 *addr)
                       get_unaligned_le16(addr + 4));
 }
 
-static void
+static int
 mt76x2_mac_process_rate(struct ieee80211_rx_status *status, u16 rate)
 {
        u8 idx = FIELD_GET(MT_RXWI_RATE_INDEX, rate);
@@ -42,7 +42,7 @@ mt76x2_mac_process_rate(struct ieee80211_rx_status *status, u16 rate)
                        idx += 4;
 
                status->rate_idx = idx;
-               return;
+               return 0;
        case MT_PHY_TYPE_CCK:
                if (idx >= 8) {
                        idx -= 8;
@@ -53,7 +53,7 @@ mt76x2_mac_process_rate(struct ieee80211_rx_status *status, u16 rate)
                        idx = 0;
 
                status->rate_idx = idx;
-               return;
+               return 0;
        case MT_PHY_TYPE_HT_GF:
                status->enc_flags |= RX_ENC_FLAG_HT_GF;
                /* fall through */
@@ -67,8 +67,7 @@ mt76x2_mac_process_rate(struct ieee80211_rx_status *status, u16 rate)
                status->nss = FIELD_GET(MT_RATE_INDEX_VHT_NSS, idx) + 1;
                break;
        default:
-               WARN_ON(1);
-               return;
+               return -EINVAL;
        }
 
        if (rate & MT_RXWI_RATE_LDPC)
@@ -92,6 +91,8 @@ mt76x2_mac_process_rate(struct ieee80211_rx_status *status, u16 rate)
        default:
                break;
        }
+
+       return 0;
 }
 
 static __le16
@@ -272,12 +273,10 @@ int mt76x2_mac_process_rx(struct mt76x2_dev *dev, struct sk_buff *skb,
        status->freq = dev->mt76.chandef.chan->center_freq;
        status->band = dev->mt76.chandef.chan->band;
 
-       mt76x2_mac_process_rate(status, rate);
-
-       return 0;
+       return mt76x2_mac_process_rate(status, rate);
 }
 
-static void
+static int
 mt76x2_mac_process_tx_rate(struct ieee80211_tx_rate *txrate, u16 rate,
                           enum nl80211_band band)
 {
@@ -293,13 +292,13 @@ mt76x2_mac_process_tx_rate(struct ieee80211_tx_rate *txrate, u16 rate,
                        idx += 4;
 
                txrate->idx = idx;
-               return;
+               return 0;
        case MT_PHY_TYPE_CCK:
                if (idx >= 8)
                        idx -= 8;
 
                txrate->idx = idx;
-               return;
+               return 0;
        case MT_PHY_TYPE_HT_GF:
                txrate->flags |= IEEE80211_TX_RC_GREEN_FIELD;
                /* fall through */
@@ -312,8 +311,7 @@ mt76x2_mac_process_tx_rate(struct ieee80211_tx_rate *txrate, u16 rate,
                txrate->idx = idx;
                break;
        default:
-               WARN_ON(1);
-               return;
+               return -EINVAL;
        }
 
        switch (FIELD_GET(MT_RXWI_RATE_BW, rate)) {
@@ -326,12 +324,14 @@ mt76x2_mac_process_tx_rate(struct ieee80211_tx_rate *txrate, u16 rate,
                txrate->flags |= IEEE80211_TX_RC_80_MHZ_WIDTH;
                break;
        default:
-               WARN_ON(1);
+               return -EINVAL;
                break;
        }
 
        if (rate & MT_RXWI_RATE_SGI)
                txrate->flags |= IEEE80211_TX_RC_SHORT_GI;
+
+       return 0;
 }
 
 static void
index 2cef48e..963aea9 100644 (file)
@@ -83,7 +83,6 @@ mt76x2_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
        struct mt76x2_dev *dev = hw->priv;
        struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv;
        unsigned int idx = 0;
-       int ret = 0;
 
        if (vif->addr[0] & BIT(1))
                idx = 1 + (((dev->mt76.macaddr[0] ^ vif->addr[0]) >> 2) & 7);
@@ -109,7 +108,7 @@ mt76x2_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
        mvif->group_wcid.hw_key_idx = -1;
        mt76x2_txq_init(dev, vif->txq);
 
-       return ret;
+       return 0;
 }
 
 static void
@@ -153,9 +152,21 @@ mt76x2_config(struct ieee80211_hw *hw, u32 changed)
 
        mutex_lock(&dev->mutex);
 
+       if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
+               if (!(hw->conf.flags & IEEE80211_CONF_MONITOR))
+                       dev->rxfilter |= MT_RX_FILTR_CFG_PROMISC;
+               else
+                       dev->rxfilter &= ~MT_RX_FILTR_CFG_PROMISC;
+
+               mt76_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter);
+       }
+
        if (changed & IEEE80211_CONF_CHANGE_POWER) {
                dev->txpower_conf = hw->conf.power_level * 2;
 
+               /* convert to per-chain power for 2x2 devices */
+               dev->txpower_conf -= 6;
+
                if (test_bit(MT76_STATE_RUNNING, &dev->mt76.state)) {
                        mt76x2_phy_set_txpower(dev);
                        mt76x2_tx_set_txpwr_auto(dev, dev->txpower_conf);
@@ -438,6 +449,10 @@ mt76x2_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif, int *dbm)
        struct mt76x2_dev *dev = hw->priv;
 
        *dbm = dev->txpower_cur / 2;
+
+       /* convert from per-chain power to combined output on 2x2 devices */
+       *dbm += 3;
+
        return 0;
 }
 
index d45737e..15820b1 100644 (file)
@@ -45,6 +45,8 @@ static struct sk_buff *mt76x2_mcu_msg_alloc(const void *data, int len)
        struct sk_buff *skb;
 
        skb = alloc_skb(len, GFP_KERNEL);
+       if (!skb)
+               return NULL;
        memcpy(skb_put(skb, len), data, len);
 
        return skb;
index 1264971..5b74274 100644 (file)
@@ -102,26 +102,15 @@ mt76x2_limit_rate_power(struct mt76_rate_power *r, int limit)
                        r->all[i] = limit;
 }
 
-static int
-mt76x2_get_max_power(struct mt76_rate_power *r)
-{
-       int i;
-       s8 ret = 0;
-
-       for (i = 0; i < sizeof(r->all); i++)
-               ret = max(ret, r->all[i]);
-
-       return ret;
-}
-
 void mt76x2_phy_set_txpower(struct mt76x2_dev *dev)
 {
        enum nl80211_chan_width width = dev->mt76.chandef.width;
+       struct ieee80211_channel *chan = dev->mt76.chandef.chan;
        struct mt76x2_tx_power_info txp;
        int txp_0, txp_1, delta = 0;
        struct mt76_rate_power t = {};
 
-       mt76x2_get_power_info(dev, &txp);
+       mt76x2_get_power_info(dev, &txp, chan);
 
        if (width == NL80211_CHAN_WIDTH_40)
                delta = txp.delta_bw40;
@@ -131,11 +120,11 @@ void mt76x2_phy_set_txpower(struct mt76x2_dev *dev)
        if (txp.target_power > dev->txpower_conf)
                delta -= txp.target_power - dev->txpower_conf;
 
-       mt76x2_get_rate_power(dev, &t);
+       mt76x2_get_rate_power(dev, &t, chan);
        mt76x2_add_rate_power_offset(&t, txp.chain[0].target_power +
                                   txp.chain[0].delta);
        mt76x2_limit_rate_power(&t, dev->txpower_conf);
-       dev->txpower_cur = mt76x2_get_max_power(&t);
+       dev->txpower_cur = mt76x2_get_max_rate_power(&t);
        mt76x2_add_rate_power_offset(&t, -(txp.chain[0].target_power +
                                         txp.chain[0].delta + delta));
        dev->target_power = txp.chain[0].target_power;
@@ -325,8 +314,7 @@ mt76x2_configure_tx_delay(struct mt76x2_dev *dev, enum nl80211_band band, u8 bw)
        mt76_wr(dev, MT_TX_SW_CFG0, cfg0);
        mt76_wr(dev, MT_TX_SW_CFG1, cfg1);
 
-       mt76_rmw_field(dev, MT_XIFS_TIME_CFG, MT_XIFS_TIME_CFG_CCK_SIFS,
-                      13 + (bw ? 1 : 0));
+       mt76_rmw_field(dev, MT_XIFS_TIME_CFG, MT_XIFS_TIME_CFG_OFDM_SIFS, 15);
 }
 
 static void
@@ -559,7 +547,6 @@ int mt76x2_phy_set_channel(struct mt76x2_dev *dev,
        u8 bw, bw_index;
        int freq, freq1;
        int ret;
-       u8 sifs = 13;
 
        dev->cal.channel_cal_done = false;
        freq = chandef->chan->center_freq;
@@ -611,11 +598,6 @@ int mt76x2_phy_set_channel(struct mt76x2_dev *dev,
                  MT_EXT_CCA_CFG_CCA_MASK),
                 ext_cca_chan[ch_group_index]);
 
-       if (chandef->width >= NL80211_CHAN_WIDTH_40)
-               sifs++;
-
-       mt76_rmw_field(dev, MT_XIFS_TIME_CFG, MT_XIFS_TIME_CFG_OFDM_SIFS, sifs);
-
        ret = mt76x2_mcu_set_channel(dev, channel, bw, bw_index, scan);
        if (ret)
                return ret;
@@ -682,7 +664,7 @@ mt76x2_phy_tssi_compensate(struct mt76x2_dev *dev)
                        return;
 
                dev->cal.tssi_comp_pending = false;
-               mt76x2_get_power_info(dev, &txp);
+               mt76x2_get_power_info(dev, &txp, chan);
 
                if (mt76x2_ext_pa_enabled(dev, chan->band))
                        t.pa_mode = 1;
index 6711e7f..abf1099 100644 (file)
@@ -190,7 +190,7 @@ static struct wireless_dev *qtnf_add_virtual_intf(struct wiphy *wiphy,
                goto err_mac;
        }
 
-       if (qtnf_core_net_attach(mac, vif, name, name_assign_t, type)) {
+       if (qtnf_core_net_attach(mac, vif, name, name_assign_t)) {
                pr_err("VIF%u.%u: failed to attach netdev\n", mac->macid,
                       vif->vifid);
                goto err_net;
@@ -381,6 +381,7 @@ qtnf_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
        const struct ieee80211_mgmt *mgmt_frame = (void *)params->buf;
        u32 short_cookie = prandom_u32();
        u16 flags = 0;
+       u16 freq;
 
        *cookie = short_cookie;
 
@@ -393,13 +394,21 @@ qtnf_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
        if (params->dont_wait_for_ack)
                flags |= QLINK_MGMT_FRAME_TX_FLAG_ACK_NOWAIT;
 
+       /* If channel is not specified, pass "freq = 0" to tell device
+        * firmware to use current channel.
+        */
+       if (params->chan)
+               freq = params->chan->center_freq;
+       else
+               freq = 0;
+
        pr_debug("%s freq:%u; FC:%.4X; DA:%pM; len:%zu; C:%.8X; FL:%.4X\n",
-                wdev->netdev->name, params->chan->center_freq,
+                wdev->netdev->name, freq,
                 le16_to_cpu(mgmt_frame->frame_control), mgmt_frame->da,
                 params->len, short_cookie, flags);
 
        return qtnf_cmd_send_mgmt_frame(vif, short_cookie, flags,
-                                       params->chan->center_freq,
+                                       freq,
                                        params->buf, params->len);
 }
 
@@ -409,6 +418,7 @@ qtnf_get_station(struct wiphy *wiphy, struct net_device *dev,
 {
        struct qtnf_vif *vif = qtnf_netdev_get_priv(dev);
 
+       sinfo->generation = vif->generation;
        return qtnf_cmd_get_sta_info(vif, mac, sinfo);
 }
 
@@ -430,11 +440,13 @@ qtnf_dump_station(struct wiphy *wiphy, struct net_device *dev,
        ret = qtnf_cmd_get_sta_info(vif, sta_node->mac_addr, sinfo);
 
        if (unlikely(ret == -ENOENT)) {
-               qtnf_sta_list_del(&vif->sta_list, mac);
+               qtnf_sta_list_del(vif, mac);
                cfg80211_del_sta(vif->netdev, mac, GFP_KERNEL);
                sinfo->filled = 0;
        }
 
+       sinfo->generation = vif->generation;
+
        return ret;
 }
 
@@ -717,7 +729,8 @@ qtnf_get_channel(struct wiphy *wiphy, struct wireless_dev *wdev,
        }
 
        if (!cfg80211_chandef_valid(chandef)) {
-               pr_err("%s: bad chan freq1=%u freq2=%u bw=%u\n", ndev->name,
+               pr_err("%s: bad channel freq=%u cf1=%u cf2=%u bw=%u\n",
+                      ndev->name, chandef->chan->center_freq,
                       chandef->center_freq1, chandef->center_freq2,
                       chandef->width);
                ret = -ENODATA;
@@ -750,6 +763,35 @@ static int qtnf_channel_switch(struct wiphy *wiphy, struct net_device *dev,
        return ret;
 }
 
+static int qtnf_start_radar_detection(struct wiphy *wiphy,
+                                     struct net_device *ndev,
+                                     struct cfg80211_chan_def *chandef,
+                                     u32 cac_time_ms)
+{
+       struct qtnf_vif *vif = qtnf_netdev_get_priv(ndev);
+       int ret;
+
+       ret = qtnf_cmd_start_cac(vif, chandef, cac_time_ms);
+       if (ret)
+               pr_err("%s: failed to start CAC ret=%d\n", ndev->name, ret);
+
+       return ret;
+}
+
+static int qtnf_set_mac_acl(struct wiphy *wiphy,
+                           struct net_device *dev,
+                           const struct cfg80211_acl_data *params)
+{
+       struct qtnf_vif *vif = qtnf_netdev_get_priv(dev);
+       int ret;
+
+       ret = qtnf_cmd_set_mac_acl(vif, params);
+       if (ret)
+               pr_err("%s: failed to set mac ACL ret=%d\n", dev->name, ret);
+
+       return ret;
+}
+
 static struct cfg80211_ops qtn_cfg80211_ops = {
        .add_virtual_intf       = qtnf_add_virtual_intf,
        .change_virtual_intf    = qtnf_change_virtual_intf,
@@ -773,7 +815,9 @@ static struct cfg80211_ops qtn_cfg80211_ops = {
        .disconnect             = qtnf_disconnect,
        .dump_survey            = qtnf_dump_survey,
        .get_channel            = qtnf_get_channel,
-       .channel_switch         = qtnf_channel_switch
+       .channel_switch         = qtnf_channel_switch,
+       .start_radar_detection  = qtnf_start_radar_detection,
+       .set_mac_acl            = qtnf_set_mac_acl,
 };
 
 static void qtnf_cfg80211_reg_notifier(struct wiphy *wiphy_in,
@@ -802,6 +846,9 @@ static void qtnf_cfg80211_reg_notifier(struct wiphy *wiphy_in,
                        continue;
 
                mac = bus->mac[mac_idx];
+               if (!mac)
+                       continue;
+
                wiphy = priv_to_wiphy(mac);
 
                for (band = 0; band < NUM_NL80211_BANDS; ++band) {
@@ -886,6 +933,7 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac)
        wiphy->max_scan_ie_len = QTNF_MAX_VSIE_LEN;
        wiphy->mgmt_stypes = qtnf_mgmt_stypes;
        wiphy->max_remain_on_channel_duration = 5000;
+       wiphy->max_acl_mac_addrs = mac->macinfo.max_acl_mac_addrs;
 
        wiphy->iface_combinations = iface_comb;
        wiphy->n_iface_combinations = 1;
index 8bc8dd6..6ffe483 100644 (file)
@@ -162,6 +162,14 @@ static void qtnf_cmd_tlv_ie_set_add(struct sk_buff *cmd_skb, u8 frame_type,
                memcpy(tlv->ie_data, buf, len);
 }
 
+static inline size_t qtnf_cmd_acl_data_size(const struct cfg80211_acl_data *acl)
+{
+       size_t size = sizeof(struct qlink_acl_data) +
+                     acl->n_acl_entries * sizeof(struct qlink_mac_address);
+
+       return size;
+}
+
 static bool qtnf_cmd_start_ap_can_fit(const struct qtnf_vif *vif,
                                      const struct cfg80211_ap_settings *s)
 {
@@ -178,6 +186,9 @@ static bool qtnf_cmd_start_ap_can_fit(const struct qtnf_vif *vif,
        if (cfg80211_chandef_valid(&s->chandef))
                len += sizeof(struct qlink_tlv_chandef);
 
+       if (s->acl)
+               len += qtnf_cmd_acl_data_size(s->acl);
+
        if (len > (sizeof(struct qlink_cmd) + QTNF_MAX_CMD_BUF_SIZE)) {
                pr_err("VIF%u.%u: can not fit AP settings: %u\n",
                       vif->mac->macid, vif->vifid, len);
@@ -247,7 +258,7 @@ int qtnf_cmd_send_start_ap(struct qtnf_vif *vif,
                chtlv->hdr.type = cpu_to_le16(QTN_TLV_ID_CHANDEF);
                chtlv->hdr.len = cpu_to_le16(sizeof(*chtlv) -
                                             sizeof(chtlv->hdr));
-               qlink_chandef_cfg2q(&s->chandef, &chtlv->chan);
+               qlink_chandef_cfg2q(&s->chandef, &chtlv->chdef);
        }
 
        qtnf_cmd_tlv_ie_set_add(cmd_skb, QLINK_IE_SET_BEACON_HEAD,
@@ -283,6 +294,16 @@ int qtnf_cmd_send_start_ap(struct qtnf_vif *vif,
                memcpy(tlv->val, s->vht_cap, sizeof(*s->vht_cap));
        }
 
+       if (s->acl) {
+               size_t acl_size = qtnf_cmd_acl_data_size(s->acl);
+               struct qlink_tlv_hdr *tlv =
+                       skb_put(cmd_skb, sizeof(*tlv) + acl_size);
+
+               tlv->type = cpu_to_le16(QTN_TLV_ID_ACL_DATA);
+               tlv->len = cpu_to_le16(acl_size);
+               qlink_acl_data_cfg2q(s->acl, (struct qlink_acl_data *)tlv->val);
+       }
+
        qtnf_bus_lock(vif->mac->bus);
 
        ret = qtnf_cmd_send(vif->mac->bus, cmd_skb, &res_code);
@@ -461,30 +482,8 @@ out:
 }
 
 static void
-qtnf_sta_info_parse_basic_counters(struct station_info *sinfo,
-               const struct qlink_sta_stat_basic_counters *counters)
-{
-       sinfo->filled |= BIT(NL80211_STA_INFO_RX_BYTES) |
-                        BIT(NL80211_STA_INFO_TX_BYTES);
-       sinfo->rx_bytes = get_unaligned_le64(&counters->rx_bytes);
-       sinfo->tx_bytes = get_unaligned_le64(&counters->tx_bytes);
-
-       sinfo->filled |= BIT(NL80211_STA_INFO_RX_PACKETS) |
-                        BIT(NL80211_STA_INFO_TX_PACKETS) |
-                        BIT(NL80211_STA_INFO_BEACON_RX);
-       sinfo->rx_packets = get_unaligned_le32(&counters->rx_packets);
-       sinfo->tx_packets = get_unaligned_le32(&counters->tx_packets);
-       sinfo->rx_beacon = get_unaligned_le64(&counters->rx_beacons);
-
-       sinfo->filled |= BIT(NL80211_STA_INFO_RX_DROP_MISC) |
-                        BIT(NL80211_STA_INFO_TX_FAILED);
-       sinfo->rx_dropped_misc = get_unaligned_le32(&counters->rx_dropped);
-       sinfo->tx_failed = get_unaligned_le32(&counters->tx_failed);
-}
-
-static void
 qtnf_sta_info_parse_rate(struct rate_info *rate_dst,
-                        const struct  qlink_sta_info_rate *rate_src)
+                        const struct qlink_sta_info_rate *rate_src)
 {
        rate_dst->legacy = get_unaligned_le16(&rate_src->rate) * 10;
 
@@ -493,22 +492,23 @@ qtnf_sta_info_parse_rate(struct rate_info *rate_dst,
        rate_dst->flags = 0;
 
        switch (rate_src->bw) {
-       case QLINK_STA_INFO_RATE_BW_5:
+       case QLINK_CHAN_WIDTH_5:
                rate_dst->bw = RATE_INFO_BW_5;
                break;
-       case QLINK_STA_INFO_RATE_BW_10:
+       case QLINK_CHAN_WIDTH_10:
                rate_dst->bw = RATE_INFO_BW_10;
                break;
-       case QLINK_STA_INFO_RATE_BW_20:
+       case QLINK_CHAN_WIDTH_20:
+       case QLINK_CHAN_WIDTH_20_NOHT:
                rate_dst->bw = RATE_INFO_BW_20;
                break;
-       case QLINK_STA_INFO_RATE_BW_40:
+       case QLINK_CHAN_WIDTH_40:
                rate_dst->bw = RATE_INFO_BW_40;
                break;
-       case QLINK_STA_INFO_RATE_BW_80:
+       case QLINK_CHAN_WIDTH_80:
                rate_dst->bw = RATE_INFO_BW_80;
                break;
-       case QLINK_STA_INFO_RATE_BW_160:
+       case QLINK_CHAN_WIDTH_160:
                rate_dst->bw = RATE_INFO_BW_160;
                break;
        default:
@@ -578,87 +578,125 @@ qtnf_sta_info_parse_flags(struct nl80211_sta_flag_update *dst,
 }
 
 static void
-qtnf_sta_info_parse_generic_info(struct station_info *sinfo,
-                                const struct qlink_sta_info_generic *info)
+qtnf_cmd_sta_info_parse(struct station_info *sinfo,
+                       const struct qlink_tlv_hdr *tlv,
+                       size_t resp_size)
 {
-       sinfo->filled |= BIT(NL80211_STA_INFO_CONNECTED_TIME) |
-                        BIT(NL80211_STA_INFO_INACTIVE_TIME);
-       sinfo->connected_time = get_unaligned_le32(&info->connected_time);
-       sinfo->inactive_time = get_unaligned_le32(&info->inactive_time);
+       const struct qlink_sta_stats *stats = NULL;
+       const u8 *map = NULL;
+       unsigned int map_len = 0;
+       unsigned int stats_len = 0;
+       u16 tlv_len;
+
+#define qtnf_sta_stat_avail(stat_name, bitn)   \
+       (qtnf_utils_is_bit_set(map, bitn, map_len) && \
+        (offsetofend(struct qlink_sta_stats, stat_name) <= stats_len))
+
+       while (resp_size >= sizeof(*tlv)) {
+               tlv_len = le16_to_cpu(tlv->len);
+
+               switch (le16_to_cpu(tlv->type)) {
+               case QTN_TLV_ID_STA_STATS_MAP:
+                       map_len = tlv_len;
+                       map = tlv->val;
+                       break;
+               case QTN_TLV_ID_STA_STATS:
+                       stats_len = tlv_len;
+                       stats = (const struct qlink_sta_stats *)tlv->val;
+                       break;
+               default:
+                       break;
+               }
 
-       sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL) |
-                        BIT(NL80211_STA_INFO_SIGNAL_AVG);
-       sinfo->signal = info->rssi - 120;
-       sinfo->signal_avg = info->rssi_avg - QLINK_RSSI_OFFSET;
+               resp_size -= tlv_len + sizeof(*tlv);
+               tlv = (const struct qlink_tlv_hdr *)(tlv->val + tlv_len);
+       }
+
+       if (!map || !stats)
+               return;
+
+       if (qtnf_sta_stat_avail(inactive_time, QLINK_STA_INFO_INACTIVE_TIME)) {
+               sinfo->filled |= BIT(NL80211_STA_INFO_INACTIVE_TIME);
+               sinfo->inactive_time = le32_to_cpu(stats->inactive_time);
+       }
+
+       if (qtnf_sta_stat_avail(connected_time,
+                               QLINK_STA_INFO_CONNECTED_TIME)) {
+               sinfo->filled |= BIT(NL80211_STA_INFO_CONNECTED_TIME);
+               sinfo->connected_time = le32_to_cpu(stats->connected_time);
+       }
+
+       if (qtnf_sta_stat_avail(signal, QLINK_STA_INFO_SIGNAL)) {
+               sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL);
+               sinfo->signal = stats->signal - QLINK_RSSI_OFFSET;
+       }
 
-       if (info->rx_rate.rate) {
+       if (qtnf_sta_stat_avail(signal_avg, QLINK_STA_INFO_SIGNAL_AVG)) {
+               sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL_AVG);
+               sinfo->signal_avg = stats->signal_avg - QLINK_RSSI_OFFSET;
+       }
+
+       if (qtnf_sta_stat_avail(rxrate, QLINK_STA_INFO_RX_BITRATE)) {
                sinfo->filled |= BIT(NL80211_STA_INFO_RX_BITRATE);
-               qtnf_sta_info_parse_rate(&sinfo->rxrate, &info->rx_rate);
+               qtnf_sta_info_parse_rate(&sinfo->rxrate, &stats->rxrate);
        }
 
-       if (info->tx_rate.rate) {
+       if (qtnf_sta_stat_avail(txrate, QLINK_STA_INFO_TX_BITRATE)) {
                sinfo->filled |= BIT(NL80211_STA_INFO_TX_BITRATE);
-               qtnf_sta_info_parse_rate(&sinfo->txrate, &info->tx_rate);
+               qtnf_sta_info_parse_rate(&sinfo->txrate, &stats->txrate);
        }
 
-       sinfo->filled |= BIT(NL80211_STA_INFO_STA_FLAGS);
-       qtnf_sta_info_parse_flags(&sinfo->sta_flags, &info->state);
-}
+       if (qtnf_sta_stat_avail(sta_flags, QLINK_STA_INFO_STA_FLAGS)) {
+               sinfo->filled |= BIT(NL80211_STA_INFO_STA_FLAGS);
+               qtnf_sta_info_parse_flags(&sinfo->sta_flags, &stats->sta_flags);
+       }
 
-static int qtnf_cmd_sta_info_parse(struct station_info *sinfo,
-                                  const u8 *payload, size_t payload_size)
-{
-       const struct qlink_sta_stat_basic_counters *counters;
-       const struct qlink_sta_info_generic *sta_info;
-       u16 tlv_type;
-       u16 tlv_value_len;
-       size_t tlv_full_len;
-       const struct qlink_tlv_hdr *tlv;
+       if (qtnf_sta_stat_avail(rx_bytes, QLINK_STA_INFO_RX_BYTES)) {
+               sinfo->filled |= BIT(NL80211_STA_INFO_RX_BYTES);
+               sinfo->rx_bytes = le64_to_cpu(stats->rx_bytes);
+       }
 
-       sinfo->filled = 0;
+       if (qtnf_sta_stat_avail(tx_bytes, QLINK_STA_INFO_TX_BYTES)) {
+               sinfo->filled |= BIT(NL80211_STA_INFO_TX_BYTES);
+               sinfo->tx_bytes = le64_to_cpu(stats->tx_bytes);
+       }
 
-       tlv = (const struct qlink_tlv_hdr *)payload;
-       while (payload_size >= sizeof(struct qlink_tlv_hdr)) {
-               tlv_type = le16_to_cpu(tlv->type);
-               tlv_value_len = le16_to_cpu(tlv->len);
-               tlv_full_len = tlv_value_len + sizeof(struct qlink_tlv_hdr);
-               if (tlv_full_len > payload_size) {
-                       pr_warn("malformed TLV 0x%.2X; LEN: %u\n",
-                               tlv_type, tlv_value_len);
-                       return -EINVAL;
-               }
-               switch (tlv_type) {
-               case QTN_TLV_ID_STA_BASIC_COUNTERS:
-                       if (unlikely(tlv_value_len < sizeof(*counters))) {
-                               pr_err("invalid TLV size %.4X: %u\n",
-                                      tlv_type, tlv_value_len);
-                               break;
-                       }
+       if (qtnf_sta_stat_avail(rx_bytes, QLINK_STA_INFO_RX_BYTES64)) {
+               sinfo->filled |= BIT(NL80211_STA_INFO_RX_BYTES64);
+               sinfo->rx_bytes = le64_to_cpu(stats->rx_bytes);
+       }
 
-                       counters = (void *)tlv->val;
-                       qtnf_sta_info_parse_basic_counters(sinfo, counters);
-                       break;
-               case QTN_TLV_ID_STA_GENERIC_INFO:
-                       if (unlikely(tlv_value_len < sizeof(*sta_info)))
-                               break;
+       if (qtnf_sta_stat_avail(tx_bytes, QLINK_STA_INFO_TX_BYTES64)) {
+               sinfo->filled |= BIT(NL80211_STA_INFO_TX_BYTES64);
+               sinfo->tx_bytes = le64_to_cpu(stats->tx_bytes);
+       }
 
-                       sta_info = (void *)tlv->val;
-                       qtnf_sta_info_parse_generic_info(sinfo, sta_info);
-                       break;
-               default:
-                       pr_warn("unexpected TLV type: %.4X\n", tlv_type);
-                       break;
-               }
-               payload_size -= tlv_full_len;
-               tlv = (struct qlink_tlv_hdr *)(tlv->val + tlv_value_len);
+       if (qtnf_sta_stat_avail(rx_packets, QLINK_STA_INFO_RX_PACKETS)) {
+               sinfo->filled |= BIT(NL80211_STA_INFO_RX_PACKETS);
+               sinfo->rx_packets = le32_to_cpu(stats->rx_packets);
        }
 
-       if (payload_size) {
-               pr_warn("malformed TLV buf; bytes left: %zu\n", payload_size);
-               return -EINVAL;
+       if (qtnf_sta_stat_avail(tx_packets, QLINK_STA_INFO_TX_PACKETS)) {
+               sinfo->filled |= BIT(NL80211_STA_INFO_TX_PACKETS);
+               sinfo->tx_packets = le32_to_cpu(stats->tx_packets);
        }
 
-       return 0;
+       if (qtnf_sta_stat_avail(rx_beacon, QLINK_STA_INFO_BEACON_RX)) {
+               sinfo->filled |= BIT(NL80211_STA_INFO_BEACON_RX);
+               sinfo->rx_beacon = le64_to_cpu(stats->rx_beacon);
+       }
+
+       if (qtnf_sta_stat_avail(rx_dropped_misc, QLINK_STA_INFO_RX_DROP_MISC)) {
+               sinfo->filled |= BIT(NL80211_STA_INFO_RX_DROP_MISC);
+               sinfo->rx_dropped_misc = le32_to_cpu(stats->rx_dropped_misc);
+       }
+
+       if (qtnf_sta_stat_avail(tx_failed, QLINK_STA_INFO_TX_FAILED)) {
+               sinfo->filled |= BIT(NL80211_STA_INFO_TX_FAILED);
+               sinfo->tx_failed = le32_to_cpu(stats->tx_failed);
+       }
+
+#undef qtnf_sta_stat_avail
 }
 
 int qtnf_cmd_get_sta_info(struct qtnf_vif *vif, const u8 *sta_mac,
@@ -715,7 +753,9 @@ int qtnf_cmd_get_sta_info(struct qtnf_vif *vif, const u8 *sta_mac,
                goto out;
        }
 
-       ret = qtnf_cmd_sta_info_parse(sinfo, resp->info, var_resp_len);
+       qtnf_cmd_sta_info_parse(sinfo,
+                               (const struct qlink_tlv_hdr *)resp->info,
+                               var_resp_len);
 
 out:
        qtnf_bus_unlock(vif->mac->bus);
@@ -1029,6 +1069,10 @@ static int qtnf_parse_variable_mac_info(struct qtnf_wmac *mac,
        struct qlink_iface_comb_num *comb;
        size_t tlv_full_len;
        const struct qlink_tlv_hdr *tlv;
+       u8 *ext_capa = NULL;
+       u8 *ext_capa_mask = NULL;
+       u8 ext_capa_len = 0;
+       u8 ext_capa_mask_len = 0;
 
        mac->macinfo.n_limits = 0;
 
@@ -1092,6 +1136,18 @@ static int qtnf_parse_variable_mac_info(struct qtnf_wmac *mac,
                        if (limits[rec].types)
                                rec++;
                        break;
+               case WLAN_EID_EXT_CAPABILITY:
+                       if (unlikely(tlv_value_len > U8_MAX))
+                               return -EINVAL;
+                       ext_capa = (u8 *)tlv->val;
+                       ext_capa_len = tlv_value_len;
+                       break;
+               case QTN_TLV_ID_EXT_CAPABILITY_MASK:
+                       if (unlikely(tlv_value_len > U8_MAX))
+                               return -EINVAL;
+                       ext_capa_mask = (u8 *)tlv->val;
+                       ext_capa_mask_len = tlv_value_len;
+                       break;
                default:
                        break;
                }
@@ -1112,6 +1168,34 @@ static int qtnf_parse_variable_mac_info(struct qtnf_wmac *mac,
                return -EINVAL;
        }
 
+       if (ext_capa_len != ext_capa_mask_len) {
+               pr_err("MAC%u: ext_capa/_mask lengths mismatch: %u != %u\n",
+                      mac->macid, ext_capa_len, ext_capa_mask_len);
+               return -EINVAL;
+       }
+
+       if (ext_capa_len > 0) {
+               ext_capa = kmemdup(ext_capa, ext_capa_len, GFP_KERNEL);
+               if (!ext_capa)
+                       return -ENOMEM;
+
+               ext_capa_mask =
+                       kmemdup(ext_capa_mask, ext_capa_mask_len, GFP_KERNEL);
+               if (!ext_capa_mask) {
+                       kfree(ext_capa);
+                       return -ENOMEM;
+               }
+       } else {
+               ext_capa = NULL;
+               ext_capa_mask = NULL;
+       }
+
+       kfree(mac->macinfo.extended_capabilities);
+       kfree(mac->macinfo.extended_capabilities_mask);
+       mac->macinfo.extended_capabilities = ext_capa;
+       mac->macinfo.extended_capabilities_mask = ext_capa_mask;
+       mac->macinfo.extended_capabilities_len = ext_capa_len;
+
        return 0;
 }
 
@@ -1143,6 +1227,7 @@ qtnf_cmd_resp_proc_mac_info(struct qtnf_wmac *mac,
        mac_info->radar_detect_widths =
                        qlink_chan_width_mask_to_nl(le16_to_cpu(
                                        resp_info->radar_detect_widths));
+       mac_info->max_acl_mac_addrs = le32_to_cpu(resp_info->max_acl_mac_addrs);
 
        memcpy(&mac_info->ht_cap_mod_mask, &resp_info->ht_cap_mod_mask,
               sizeof(mac_info->ht_cap_mod_mask));
@@ -1186,7 +1271,7 @@ qtnf_cmd_resp_fill_band_info(struct ieee80211_supported_band *band,
        size_t tlv_len;
        size_t tlv_dlen;
        const struct qlink_tlv_hdr *tlv;
-       const struct qlink_tlv_channel *qchan;
+       const struct qlink_channel *qchan;
        struct ieee80211_channel *chan;
        unsigned int chidx = 0;
        u32 qflags;
@@ -1232,7 +1317,7 @@ qtnf_cmd_resp_fill_band_info(struct ieee80211_supported_band *band,
 
                switch (tlv_type) {
                case QTN_TLV_ID_CHANNEL:
-                       if (unlikely(tlv_len != sizeof(*qchan))) {
+                       if (unlikely(tlv_dlen != sizeof(*qchan))) {
                                pr_err("invalid channel TLV len %zu\n",
                                       tlv_len);
                                goto error_ret;
@@ -1243,7 +1328,7 @@ qtnf_cmd_resp_fill_band_info(struct ieee80211_supported_band *band,
                                goto error_ret;
                        }
 
-                       qchan = (const struct qlink_tlv_channel *)tlv;
+                       qchan = (const struct qlink_channel *)tlv->val;
                        chan = &band->channels[chidx++];
                        qflags = le32_to_cpu(qchan->flags);
 
@@ -1948,21 +2033,17 @@ int qtnf_cmd_send_change_sta(struct qtnf_vif *vif, const u8 *mac,
 
        cmd = (struct qlink_cmd_change_sta *)cmd_skb->data;
        ether_addr_copy(cmd->sta_addr, mac);
+       cmd->flag_update.mask =
+               cpu_to_le32(qtnf_encode_sta_flags(params->sta_flags_mask));
+       cmd->flag_update.value =
+               cpu_to_le32(qtnf_encode_sta_flags(params->sta_flags_set));
 
        switch (vif->wdev.iftype) {
        case NL80211_IFTYPE_AP:
                cmd->if_type = cpu_to_le16(QLINK_IFTYPE_AP);
-               cmd->sta_flags_mask = cpu_to_le32(qtnf_encode_sta_flags(
-                                                 params->sta_flags_mask));
-               cmd->sta_flags_set = cpu_to_le32(qtnf_encode_sta_flags(
-                                                params->sta_flags_set));
                break;
        case NL80211_IFTYPE_STATION:
                cmd->if_type = cpu_to_le16(QLINK_IFTYPE_STATION);
-               cmd->sta_flags_mask = cpu_to_le32(qtnf_encode_sta_flags(
-                                                 params->sta_flags_mask));
-               cmd->sta_flags_set = cpu_to_le32(qtnf_encode_sta_flags(
-                                                params->sta_flags_set));
                break;
        default:
                pr_err("unsupported iftype %d\n", vif->wdev.iftype);
@@ -2037,8 +2118,8 @@ static void qtnf_cmd_channel_tlv_add(struct sk_buff *cmd_skb,
        qchan = skb_put_zero(cmd_skb, sizeof(*qchan));
        qchan->hdr.type = cpu_to_le16(QTN_TLV_ID_CHANNEL);
        qchan->hdr.len = cpu_to_le16(sizeof(*qchan) - sizeof(qchan->hdr));
-       qchan->center_freq = cpu_to_le16(sc->center_freq);
-       qchan->hw_value = cpu_to_le16(sc->hw_value);
+       qchan->chan.center_freq = cpu_to_le16(sc->center_freq);
+       qchan->chan.hw_value = cpu_to_le16(sc->hw_value);
 
        if (sc->flags & IEEE80211_CHAN_NO_IR)
                flags |= QLINK_CHAN_NO_IR;
@@ -2046,7 +2127,7 @@ static void qtnf_cmd_channel_tlv_add(struct sk_buff *cmd_skb,
        if (sc->flags & IEEE80211_CHAN_RADAR)
                flags |= QLINK_CHAN_RADAR;
 
-       qchan->flags = cpu_to_le32(flags);
+       qchan->chan.flags = cpu_to_le32(flags);
 }
 
 int qtnf_cmd_send_scan(struct qtnf_wmac *mac)
@@ -2512,3 +2593,81 @@ out:
        consume_skb(resp_skb);
        return ret;
 }
+
+int qtnf_cmd_start_cac(const struct qtnf_vif *vif,
+                      const struct cfg80211_chan_def *chdef,
+                      u32 cac_time_ms)
+{
+       struct qtnf_bus *bus = vif->mac->bus;
+       struct sk_buff *cmd_skb;
+       struct qlink_cmd_start_cac *cmd;
+       int ret;
+       u16 res_code;
+
+       cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
+                                           QLINK_CMD_START_CAC,
+                                           sizeof(*cmd));
+       if (unlikely(!cmd_skb))
+               return -ENOMEM;
+
+       cmd = (struct qlink_cmd_start_cac *)cmd_skb->data;
+       cmd->cac_time_ms = cpu_to_le32(cac_time_ms);
+       qlink_chandef_cfg2q(chdef, &cmd->chan);
+
+       qtnf_bus_lock(bus);
+       ret = qtnf_cmd_send(bus, cmd_skb, &res_code);
+       qtnf_bus_unlock(bus);
+
+       if (ret)
+               return ret;
+
+       switch (res_code) {
+       case QLINK_CMD_RESULT_OK:
+               break;
+       default:
+               ret = -EOPNOTSUPP;
+               break;
+       }
+
+       return ret;
+}
+
+int qtnf_cmd_set_mac_acl(const struct qtnf_vif *vif,
+                        const struct cfg80211_acl_data *params)
+{
+       struct qtnf_bus *bus = vif->mac->bus;
+       struct sk_buff *cmd_skb;
+       struct qlink_cmd_set_mac_acl *cmd;
+       u16 res_code;
+       int ret;
+
+       cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
+                                           QLINK_CMD_SET_MAC_ACL,
+                                           sizeof(*cmd) +
+                                           qtnf_cmd_acl_data_size(params));
+       if (unlikely(!cmd_skb))
+               return -ENOMEM;
+
+       cmd = (struct qlink_cmd_set_mac_acl *)cmd_skb->data;
+       qlink_acl_data_cfg2q(params, &cmd->acl);
+
+       qtnf_bus_lock(bus);
+       ret = qtnf_cmd_send(bus, cmd_skb, &res_code);
+       qtnf_bus_unlock(bus);
+
+       if (unlikely(ret))
+               return ret;
+
+       switch (res_code) {
+       case QLINK_CMD_RESULT_OK:
+               break;
+       case QLINK_CMD_RESULT_INVALID:
+               ret = -EINVAL;
+               break;
+       default:
+               ret = -EOPNOTSUPP;
+               break;
+       }
+
+       return ret;
+}
index d981a76..69a7d56 100644 (file)
@@ -76,5 +76,10 @@ int qtnf_cmd_get_chan_stats(struct qtnf_wmac *mac, u16 channel,
 int qtnf_cmd_send_chan_switch(struct qtnf_vif *vif,
                              struct cfg80211_csa_settings *params);
 int qtnf_cmd_get_channel(struct qtnf_vif *vif, struct cfg80211_chan_def *chdef);
+int qtnf_cmd_start_cac(const struct qtnf_vif *vif,
+                      const struct cfg80211_chan_def *chdef,
+                      u32 cac_time_ms);
+int qtnf_cmd_set_mac_acl(const struct qtnf_vif *vif,
+                        const struct cfg80211_acl_data *params);
 
 #endif /* QLINK_COMMANDS_H_ */
index 3423dc5..ccd982b 100644 (file)
@@ -258,7 +258,7 @@ static void qtnf_mac_init_primary_intf(struct qtnf_wmac *mac)
 {
        struct qtnf_vif *vif = &mac->iflist[QTNF_PRIMARY_VIF_IDX];
 
-       vif->wdev.iftype = NL80211_IFTYPE_AP;
+       vif->wdev.iftype = NL80211_IFTYPE_STATION;
        vif->bss_priority = QTNF_DEF_BSS_PRIORITY;
        vif->wdev.wiphy = priv_to_wiphy(mac);
        INIT_WORK(&vif->reset_work, qtnf_vif_reset_handler);
@@ -298,8 +298,7 @@ static struct qtnf_wmac *qtnf_core_mac_alloc(struct qtnf_bus *bus,
 }
 
 int qtnf_core_net_attach(struct qtnf_wmac *mac, struct qtnf_vif *vif,
-                        const char *name, unsigned char name_assign_type,
-                        enum nl80211_iftype iftype)
+                        const char *name, unsigned char name_assign_type)
 {
        struct wiphy *wiphy = priv_to_wiphy(mac);
        struct net_device *dev;
@@ -320,7 +319,6 @@ int qtnf_core_net_attach(struct qtnf_wmac *mac, struct qtnf_vif *vif,
        dev->needs_free_netdev = true;
        dev_net_set(dev, wiphy_net(wiphy));
        dev->ieee80211_ptr = &vif->wdev;
-       dev->ieee80211_ptr->iftype = iftype;
        ether_addr_copy(dev->dev_addr, vif->mac_addr);
        SET_NETDEV_DEV(dev, wiphy_dev(wiphy));
        dev->flags |= IFF_BROADCAST | IFF_MULTICAST;
@@ -383,6 +381,8 @@ static void qtnf_core_mac_detach(struct qtnf_bus *bus, unsigned int macid)
        }
 
        kfree(mac->macinfo.limits);
+       kfree(mac->macinfo.extended_capabilities);
+       kfree(mac->macinfo.extended_capabilities_mask);
        kfree(wiphy->iface_combinations);
        wiphy_free(wiphy);
        bus->mac[macid] = NULL;
@@ -418,7 +418,7 @@ static int qtnf_core_mac_attach(struct qtnf_bus *bus, unsigned int macid)
                goto error;
        }
 
-       ret = qtnf_cmd_send_add_intf(vif, NL80211_IFTYPE_AP, vif->mac_addr);
+       ret = qtnf_cmd_send_add_intf(vif, vif->wdev.iftype, vif->mac_addr);
        if (ret) {
                pr_err("MAC%u: failed to add VIF\n", macid);
                goto error;
@@ -446,8 +446,7 @@ static int qtnf_core_mac_attach(struct qtnf_bus *bus, unsigned int macid)
 
        rtnl_lock();
 
-       ret = qtnf_core_net_attach(mac, vif, "wlan%d", NET_NAME_ENUM,
-                                  NL80211_IFTYPE_AP);
+       ret = qtnf_core_net_attach(mac, vif, "wlan%d", NET_NAME_ENUM);
        rtnl_unlock();
 
        if (ret) {
index 1b7bc03..c109001 100644 (file)
@@ -88,6 +88,7 @@ struct qtnf_vif {
        struct work_struct reset_work;
        struct qtnf_sta_list sta_list;
        unsigned long cons_tx_timeout_cnt;
+       int generation;
 };
 
 struct qtnf_mac_info {
@@ -102,10 +103,14 @@ struct qtnf_mac_info {
        u8 sretry_limit;
        u8 coverage_class;
        u8 radar_detect_widths;
+       u32 max_acl_mac_addrs;
        struct ieee80211_ht_cap ht_cap_mod_mask;
        struct ieee80211_vht_cap vht_cap_mod_mask;
        struct ieee80211_iface_limit *limits;
        size_t n_limits;
+       u8 *extended_capabilities;
+       u8 *extended_capabilities_mask;
+       u8 extended_capabilities_len;
 };
 
 struct qtnf_chan_stats {
@@ -144,8 +149,7 @@ struct qtnf_vif *qtnf_mac_get_free_vif(struct qtnf_wmac *mac);
 struct qtnf_vif *qtnf_mac_get_base_vif(struct qtnf_wmac *mac);
 struct wiphy *qtnf_wiphy_allocate(struct qtnf_bus *bus);
 int qtnf_core_net_attach(struct qtnf_wmac *mac, struct qtnf_vif *priv,
-                        const char *name, unsigned char name_assign_type,
-                        enum nl80211_iftype iftype);
+                        const char *name, unsigned char name_assign_type);
 void qtnf_main_work_queue(struct work_struct *work);
 int qtnf_cmd_send_update_phy_params(struct qtnf_wmac *mac, u32 changed);
 int qtnf_cmd_send_get_phy_params(struct qtnf_wmac *mac);
index 4abc6d9..8a3d2b1 100644 (file)
@@ -59,10 +59,11 @@ qtnf_event_handle_sta_assoc(struct qtnf_wmac *mac, struct qtnf_vif *vif,
        pr_debug("VIF%u.%u: MAC:%pM FC:%x\n", mac->macid, vif->vifid, sta_addr,
                 frame_control);
 
-       qtnf_sta_list_add(&vif->sta_list, sta_addr);
+       qtnf_sta_list_add(vif, sta_addr);
 
        sinfo.assoc_req_ies = NULL;
        sinfo.assoc_req_ies_len = 0;
+       sinfo.generation = vif->generation;
 
        payload_len = len - sizeof(*sta_assoc);
        tlv = (const struct qlink_tlv_hdr *)sta_assoc->ies;
@@ -132,7 +133,7 @@ qtnf_event_handle_sta_deauth(struct qtnf_wmac *mac, struct qtnf_vif *vif,
        pr_debug("VIF%u.%u: MAC:%pM reason:%x\n", mac->macid, vif->vifid,
                 sta_addr, reason);
 
-       if (qtnf_sta_list_del(&vif->sta_list, sta_addr))
+       if (qtnf_sta_list_del(vif, sta_addr))
                cfg80211_del_sta(vif->netdev, sta_deauth->sta_addr,
                                 GFP_KERNEL);
 
@@ -237,9 +238,8 @@ qtnf_event_handle_mgmt_received(struct qtnf_vif *vif,
        pr_debug("%s LEN:%u FC:%.4X SA:%pM\n", vif->netdev->name, frame_len,
                 le16_to_cpu(frame->frame_control), frame->addr2);
 
-       cfg80211_rx_mgmt(&vif->wdev, le32_to_cpu(rxmgmt->freq),
-                        le32_to_cpu(rxmgmt->sig_dbm), rxmgmt->frame_data,
-                        frame_len, flags);
+       cfg80211_rx_mgmt(&vif->wdev, le32_to_cpu(rxmgmt->freq), rxmgmt->sig_dbm,
+                        rxmgmt->frame_data, frame_len, flags);
 
        return 0;
 }
@@ -324,7 +324,7 @@ qtnf_event_handle_scan_results(struct qtnf_vif *vif,
                                  sr->bssid, get_unaligned_le64(&sr->tsf),
                                  le16_to_cpu(sr->capab),
                                  le16_to_cpu(sr->bintval), ies, ies_len,
-                                 sr->signal, GFP_KERNEL);
+                                 DBM_TO_MBM(sr->sig_dbm), GFP_KERNEL);
        if (!bss)
                return -ENOMEM;
 
@@ -369,7 +369,8 @@ qtnf_event_handle_freq_change(struct qtnf_wmac *mac,
        qlink_chandef_q2cfg(wiphy, &data->chan, &chandef);
 
        if (!cfg80211_chandef_valid(&chandef)) {
-               pr_err("MAC%u: bad channel f1=%u f2=%u bw=%u\n", mac->macid,
+               pr_err("MAC%u: bad channel freq=%u cf1=%u cf2=%u bw=%u\n",
+                      mac->macid, chandef.chan->center_freq,
                       chandef.center_freq1, chandef.center_freq2,
                       chandef.width);
                return -EINVAL;
@@ -394,6 +395,63 @@ qtnf_event_handle_freq_change(struct qtnf_wmac *mac,
        return 0;
 }
 
+static int qtnf_event_handle_radar(struct qtnf_vif *vif,
+                                  const struct qlink_event_radar *ev,
+                                  u16 len)
+{
+       struct wiphy *wiphy = priv_to_wiphy(vif->mac);
+       struct cfg80211_chan_def chandef;
+
+       if (len < sizeof(*ev)) {
+               pr_err("MAC%u: payload is too short\n", vif->mac->macid);
+               return -EINVAL;
+       }
+
+       if (!wiphy->registered || !vif->netdev)
+               return 0;
+
+       qlink_chandef_q2cfg(wiphy, &ev->chan, &chandef);
+
+       if (!cfg80211_chandef_valid(&chandef)) {
+               pr_err("MAC%u: bad channel f1=%u f2=%u bw=%u\n",
+                      vif->mac->macid,
+                      chandef.center_freq1, chandef.center_freq2,
+                      chandef.width);
+               return -EINVAL;
+       }
+
+       pr_info("%s: radar event=%u f1=%u f2=%u bw=%u\n",
+               vif->netdev->name, ev->event,
+               chandef.center_freq1, chandef.center_freq2,
+               chandef.width);
+
+       switch (ev->event) {
+       case QLINK_RADAR_DETECTED:
+               cfg80211_radar_event(wiphy, &chandef, GFP_KERNEL);
+               break;
+       case QLINK_RADAR_CAC_FINISHED:
+               if (!vif->wdev.cac_started)
+                       break;
+
+               cfg80211_cac_event(vif->netdev, &chandef,
+                                  NL80211_RADAR_CAC_FINISHED, GFP_KERNEL);
+               break;
+       case QLINK_RADAR_CAC_ABORTED:
+               if (!vif->wdev.cac_started)
+                       break;
+
+               cfg80211_cac_event(vif->netdev, &chandef,
+                                  NL80211_RADAR_CAC_ABORTED, GFP_KERNEL);
+               break;
+       default:
+               pr_warn("%s: unhandled radar event %u\n",
+                       vif->netdev->name, ev->event);
+               break;
+       }
+
+       return 0;
+}
+
 static int qtnf_event_parse(struct qtnf_wmac *mac,
                            const struct sk_buff *event_skb)
 {
@@ -448,6 +506,10 @@ static int qtnf_event_parse(struct qtnf_wmac *mac,
                ret = qtnf_event_handle_freq_change(mac, (const void *)event,
                                                    event_len);
                break;
+       case QLINK_EVENT_RADAR:
+               ret = qtnf_event_handle_radar(vif, (const void *)event,
+                                             event_len);
+               break;
        default:
                pr_warn("unknown event type: %x\n", event_id);
                break;
index a432fb0..6a1f960 100644 (file)
@@ -19,7 +19,7 @@
 
 #include <linux/ieee80211.h>
 
-#define QLINK_PROTO_VER                6
+#define QLINK_PROTO_VER                10
 
 #define QLINK_MACID_RSVD               0xFF
 #define QLINK_VIFID_RSVD               0xFF
@@ -122,17 +122,49 @@ enum qlink_channel_width {
 };
 
 /**
+ * struct qlink_channel - qlink control channel definition
+ *
+ * @hw_value: hardware-specific value for the channel
+ * @center_freq: center frequency in MHz
+ * @flags: channel flags from &enum qlink_channel_flags
+ * @band: band this channel belongs to
+ * @max_antenna_gain: maximum antenna gain in dBi
+ * @max_power: maximum transmission power (in dBm)
+ * @max_reg_power: maximum regulatory transmission power (in dBm)
+ * @dfs_state: current state of this channel.
+ *      Only relevant if radar is required on this channel.
+ * @beacon_found: helper to regulatory code to indicate when a beacon
+ *     has been found on this channel. Use regulatory_hint_found_beacon()
+ *     to enable this, this is useful only on 5 GHz band.
+ */
+struct qlink_channel {
+       __le16 hw_value;
+       __le16 center_freq;
+       __le32 flags;
+       u8 band;
+       u8 max_antenna_gain;
+       u8 max_power;
+       u8 max_reg_power;
+       __le32 dfs_cac_ms;
+       u8 dfs_state;
+       u8 beacon_found;
+       u8 rsvd[2];
+} __packed;
+
+/**
  * struct qlink_chandef - qlink channel definition
  *
+ * @chan: primary channel definition
  * @center_freq1: center frequency of first segment
  * @center_freq2: center frequency of second segment (80+80 only)
  * @width: channel width, one of @enum qlink_channel_width
  */
 struct qlink_chandef {
+       struct qlink_channel chan;
        __le16 center_freq1;
        __le16 center_freq2;
        u8 width;
-       u8 rsvd[3];
+       u8 rsvd;
 } __packed;
 
 #define QLINK_MAX_NR_CIPHER_SUITES            5
@@ -153,6 +185,17 @@ struct qlink_auth_encr {
        u8 rsvd[2];
 } __packed;
 
+/**
+ * struct qlink_sta_info_state - station flags mask/value
+ *
+ * @mask: STA flags mask, bitmap of &enum qlink_sta_flags
+ * @value: STA flags values, bitmap of &enum qlink_sta_flags
+ */
+struct qlink_sta_info_state {
+       __le32 mask;
+       __le32 value;
+} __packed;
+
 /* QLINK Command messages related definitions
  */
 
@@ -173,6 +216,7 @@ struct qlink_auth_encr {
  * @QLINK_CMD_REG_NOTIFY: notify device about regulatory domain change. This
  *     command is supported only if device reports QLINK_HW_SUPPORTS_REG_UPDATE
  *     capability.
+ * @QLINK_CMD_START_CAC: start radar detection procedure on a specified channel.
  */
 enum qlink_cmd_type {
        QLINK_CMD_FW_INIT               = 0x0001,
@@ -192,8 +236,10 @@ enum qlink_cmd_type {
        QLINK_CMD_BAND_INFO_GET         = 0x001A,
        QLINK_CMD_CHAN_SWITCH           = 0x001B,
        QLINK_CMD_CHAN_GET              = 0x001C,
+       QLINK_CMD_START_CAC             = 0x001D,
        QLINK_CMD_START_AP              = 0x0021,
        QLINK_CMD_STOP_AP               = 0x0022,
+       QLINK_CMD_SET_MAC_ACL           = 0x0023,
        QLINK_CMD_GET_STA_INFO          = 0x0030,
        QLINK_CMD_ADD_KEY               = 0x0040,
        QLINK_CMD_DEL_KEY               = 0x0041,
@@ -368,16 +414,14 @@ struct qlink_cmd_set_def_mgmt_key {
 /**
  * struct qlink_cmd_change_sta - data for QLINK_CMD_CHANGE_STA command
  *
- * @sta_flags_mask: STA flags mask, bitmap of &enum qlink_sta_flags
- * @sta_flags_set: STA flags values, bitmap of &enum qlink_sta_flags
+ * @flag_update: STA flags to update
  * @if_type: Mode of interface operation, one of &enum qlink_iface_type
  * @vlanid: VLAN ID to assign to specific STA
  * @sta_addr: address of the STA for which parameters are set.
  */
 struct qlink_cmd_change_sta {
        struct qlink_cmd chdr;
-       __le32 sta_flags_mask;
-       __le32 sta_flags_set;
+       struct qlink_sta_info_state flag_update;
        __le16 if_type;
        __le16 vlanid;
        u8 sta_addr[ETH_ALEN];
@@ -585,6 +629,50 @@ struct qlink_cmd_start_ap {
        u8 info[0];
 } __packed;
 
+/**
+ * struct qlink_cmd_start_cac - data for QLINK_CMD_START_CAC command
+ *
+ * @chan: a channel to start a radar detection procedure on.
+ * @cac_time_ms: CAC time.
+ */
+struct qlink_cmd_start_cac {
+       struct qlink_cmd chdr;
+       struct qlink_chandef chan;
+       __le32 cac_time_ms;
+} __packed;
+
+enum qlink_acl_policy {
+       QLINK_ACL_POLICY_ACCEPT_UNLESS_LISTED,
+       QLINK_ACL_POLICY_DENY_UNLESS_LISTED,
+};
+
+struct qlink_mac_address {
+       u8 addr[ETH_ALEN];
+} __packed;
+
+/**
+ * struct qlink_acl_data - ACL data
+ *
+ * @policy: filter policy, one of &enum qlink_acl_policy.
+ * @num_entries: number of MAC addresses in array.
+ * @mac_address: MAC addresses array.
+ */
+struct qlink_acl_data {
+       __le32 policy;
+       __le32 num_entries;
+       struct qlink_mac_address mac_addrs[0];
+} __packed;
+
+/**
+ * struct qlink_cmd_set_mac_acl - data for QLINK_CMD_SET_MAC_ACL command
+ *
+ * @acl: ACL data.
+ */
+struct qlink_cmd_set_mac_acl {
+       struct qlink_cmd chdr;
+       struct qlink_acl_data acl;
+} __packed;
+
 /* QLINK Command Responses messages related definitions
  */
 
@@ -646,6 +734,7 @@ struct qlink_resp_get_mac_info {
        struct ieee80211_ht_cap ht_cap_mod_mask;
        __le16 max_ap_assoc_sta;
        __le16 radar_detect_widths;
+       __le32 max_acl_mac_addrs;
        u8 bands_cap;
        u8 rsvd[1];
        u8 var_info[0];
@@ -709,17 +798,27 @@ struct qlink_resp_manage_intf {
        struct qlink_intf_info intf_info;
 } __packed;
 
+enum qlink_sta_info_rate_flags {
+       QLINK_STA_INFO_RATE_FLAG_HT_MCS         = BIT(0),
+       QLINK_STA_INFO_RATE_FLAG_VHT_MCS        = BIT(1),
+       QLINK_STA_INFO_RATE_FLAG_SHORT_GI       = BIT(2),
+       QLINK_STA_INFO_RATE_FLAG_60G            = BIT(3),
+};
+
 /**
  * struct qlink_resp_get_sta_info - response for QLINK_CMD_GET_STA_INFO command
  *
  * Response data containing statistics for specified STA.
  *
+ * @filled: a bitmask of &enum qlink_sta_info, specifies which info in response
+ *     is valid.
  * @sta_addr: MAC address of STA the response carries statistic for.
- * @info: statistics for specified STA.
+ * @info: variable statistics for specified STA.
  */
 struct qlink_resp_get_sta_info {
        struct qlink_resp rhdr;
        u8 sta_addr[ETH_ALEN];
+       u8 rsvd[2];
        u8 info[0];
 } __packed;
 
@@ -782,6 +881,7 @@ enum qlink_event_type {
        QLINK_EVENT_BSS_JOIN            = 0x0026,
        QLINK_EVENT_BSS_LEAVE           = 0x0027,
        QLINK_EVENT_FREQ_CHANGE         = 0x0028,
+       QLINK_EVENT_RADAR               = 0x0029,
 };
 
 /**
@@ -869,15 +969,16 @@ enum qlink_rxmgmt_flags {
  * struct qlink_event_rxmgmt - data for QLINK_EVENT_MGMT_RECEIVED event
  *
  * @freq: Frequency on which the frame was received in MHz.
- * @sig_dbm: signal strength in dBm.
  * @flags: bitmap of &enum qlink_rxmgmt_flags.
+ * @sig_dbm: signal strength in dBm.
  * @frame_data: data of Rx'd frame itself.
  */
 struct qlink_event_rxmgmt {
        struct qlink_event ehdr;
        __le32 freq;
-       __le32 sig_dbm;
        __le32 flags;
+       s8 sig_dbm;
+       u8 rsvd[3];
        u8 frame_data[0];
 } __packed;
 
@@ -889,7 +990,7 @@ struct qlink_event_rxmgmt {
  *     event was generated was discovered.
  * @capab: capabilities field.
  * @bintval: beacon interval announced by discovered BSS.
- * @signal: signal strength.
+ * @sig_dbm: signal strength in dBm.
  * @bssid: BSSID announced by discovered BSS.
  * @ssid_len: length of SSID announced by BSS.
  * @ssid: SSID announced by discovered BSS.
@@ -901,7 +1002,7 @@ struct qlink_event_scan_result {
        __le16 freq;
        __le16 capab;
        __le16 bintval;
-       s8 signal;
+       s8 sig_dbm;
        u8 ssid_len;
        u8 ssid[IEEE80211_MAX_SSID_LEN];
        u8 bssid[ETH_ALEN];
@@ -931,9 +1032,39 @@ struct qlink_event_scan_complete {
        __le32 flags;
 } __packed;
 
+enum qlink_radar_event {
+       QLINK_RADAR_DETECTED,
+       QLINK_RADAR_CAC_FINISHED,
+       QLINK_RADAR_CAC_ABORTED,
+       QLINK_RADAR_NOP_FINISHED,
+       QLINK_RADAR_PRE_CAC_EXPIRED,
+};
+
+/**
+ * struct qlink_event_radar - data for QLINK_EVENT_RADAR event
+ *
+ * @chan: channel on which radar event happened.
+ * @event: radar event type, one of &enum qlink_radar_event.
+ */
+struct qlink_event_radar {
+       struct qlink_event ehdr;
+       struct qlink_chandef chan;
+       u8 event;
+       u8 rsvd[3];
+} __packed;
+
 /* QLINK TLVs (Type-Length Values) definitions
  */
 
+/**
+ * enum qlink_tlv_id - list of TLVs that Qlink messages can carry
+ *
+ * @QTN_TLV_ID_STA_STATS_MAP: a bitmap of &enum qlink_sta_info, used to
+ *     indicate which statistic carried in QTN_TLV_ID_STA_STATS is valid.
+ * @QTN_TLV_ID_STA_STATS: per-STA statistics as defined by
+ *     &struct qlink_sta_stats. Valid values are marked as such in a bitmap
+ *     carried by QTN_TLV_ID_STA_STATS_MAP.
+ */
 enum qlink_tlv_id {
        QTN_TLV_ID_FRAG_THRESH          = 0x0201,
        QTN_TLV_ID_RTS_THRESH           = 0x0202,
@@ -942,15 +1073,17 @@ enum qlink_tlv_id {
        QTN_TLV_ID_REG_RULE             = 0x0207,
        QTN_TLV_ID_CHANNEL              = 0x020F,
        QTN_TLV_ID_CHANDEF              = 0x0210,
+       QTN_TLV_ID_STA_STATS_MAP        = 0x0211,
+       QTN_TLV_ID_STA_STATS            = 0x0212,
        QTN_TLV_ID_COVERAGE_CLASS       = 0x0213,
        QTN_TLV_ID_IFACE_LIMIT          = 0x0214,
        QTN_TLV_ID_NUM_IFACE_COMB       = 0x0215,
        QTN_TLV_ID_CHANNEL_STATS        = 0x0216,
-       QTN_TLV_ID_STA_BASIC_COUNTERS   = 0x0300,
-       QTN_TLV_ID_STA_GENERIC_INFO     = 0x0301,
        QTN_TLV_ID_KEY                  = 0x0302,
        QTN_TLV_ID_SEQ                  = 0x0303,
        QTN_TLV_ID_IE_SET               = 0x0305,
+       QTN_TLV_ID_EXT_CAPABILITY_MASK  = 0x0306,
+       QTN_TLV_ID_ACL_DATA             = 0x0307,
 };
 
 struct qlink_tlv_hdr {
@@ -968,67 +1101,8 @@ struct qlink_iface_comb_num {
        __le16 iface_comb_num;
 } __packed;
 
-struct qlink_sta_stat_basic_counters {
-       __le64 rx_bytes;
-       __le64 tx_bytes;
-       __le64 rx_beacons;
-       __le32 rx_packets;
-       __le32 tx_packets;
-       __le32 rx_dropped;
-       __le32 tx_failed;
-} __packed;
-
-enum qlink_sta_info_rate_flags {
-       QLINK_STA_INFO_RATE_FLAG_INVALID        = 0,
-       QLINK_STA_INFO_RATE_FLAG_HT_MCS         = BIT(0),
-       QLINK_STA_INFO_RATE_FLAG_VHT_MCS        = BIT(1),
-       QLINK_STA_INFO_RATE_FLAG_SHORT_GI       = BIT(2),
-       QLINK_STA_INFO_RATE_FLAG_60G            = BIT(3),
-};
-
-enum qlink_sta_info_rate_bw {
-       QLINK_STA_INFO_RATE_BW_5                = 0,
-       QLINK_STA_INFO_RATE_BW_10               = 1,
-       QLINK_STA_INFO_RATE_BW_20               = 2,
-       QLINK_STA_INFO_RATE_BW_40               = 3,
-       QLINK_STA_INFO_RATE_BW_80               = 4,
-       QLINK_STA_INFO_RATE_BW_160              = 5,
-};
-
-/**
- * struct qlink_sta_info_rate - STA rate statistics
- *
- * @rate: data rate in Mbps.
- * @flags: bitmap of &enum qlink_sta_flags.
- * @mcs: 802.11-defined MCS index.
- * nss: Number of Spatial Streams.
- * @bw: bandwidth, one of &enum qlink_sta_info_rate_bw.
- */
-struct qlink_sta_info_rate {
-       __le16 rate;
-       u8 flags;
-       u8 mcs;
-       u8 nss;
-       u8 bw;
-} __packed;
-
-struct qlink_sta_info_state {
-       __le32 mask;
-       __le32 value;
-} __packed;
-
 #define QLINK_RSSI_OFFSET      120
 
-struct qlink_sta_info_generic {
-       struct qlink_sta_info_state state;
-       __le32 connected_time;
-       __le32 inactive_time;
-       struct qlink_sta_info_rate rx_rate;
-       struct qlink_sta_info_rate tx_rate;
-       u8 rssi;
-       u8 rssi_avg;
-} __packed;
-
 struct qlink_tlv_frag_rts_thr {
        struct qlink_tlv_hdr hdr;
        __le16 thr;
@@ -1113,19 +1187,16 @@ enum qlink_dfs_state {
        QLINK_DFS_AVAILABLE,
 };
 
+/**
+ * struct qlink_tlv_channel - data for QTN_TLV_ID_CHANNEL TLV
+ *
+ * Channel settings.
+ *
+ * @channel: ieee80211 channel settings.
+ */
 struct qlink_tlv_channel {
        struct qlink_tlv_hdr hdr;
-       __le16 hw_value;
-       __le16 center_freq;
-       __le32 flags;
-       u8 band;
-       u8 max_antenna_gain;
-       u8 max_power;
-       u8 max_reg_power;
-       __le32 dfs_cac_ms;
-       u8 dfs_state;
-       u8 beacon_found;
-       u8 rsvd[2];
+       struct qlink_channel chan;
 } __packed;
 
 /**
@@ -1137,7 +1208,7 @@ struct qlink_tlv_channel {
  */
 struct qlink_tlv_chandef {
        struct qlink_tlv_hdr hdr;
-       struct qlink_chandef chan;
+       struct qlink_chandef chdef;
 } __packed;
 
 enum qlink_ie_set_type {
@@ -1176,4 +1247,105 @@ struct qlink_chan_stats {
        s8 chan_noise;
 } __packed;
 
+/**
+ * enum qlink_sta_info - station information bitmap
+ *
+ * Used to indicate which statistics values in &struct qlink_sta_stats
+ * are valid. Individual values are used to fill a bitmap carried in a
+ * payload of QTN_TLV_ID_STA_STATS_MAP.
+ *
+ * @QLINK_STA_INFO_CONNECTED_TIME: connected_time value is valid.
+ * @QLINK_STA_INFO_INACTIVE_TIME: inactive_time value is valid.
+ * @QLINK_STA_INFO_RX_BYTES: lower 32 bits of rx_bytes value are valid.
+ * @QLINK_STA_INFO_TX_BYTES: lower 32 bits of tx_bytes value are valid.
+ * @QLINK_STA_INFO_RX_BYTES64: rx_bytes value is valid.
+ * @QLINK_STA_INFO_TX_BYTES64: tx_bytes value is valid.
+ * @QLINK_STA_INFO_RX_DROP_MISC: rx_dropped_misc value is valid.
+ * @QLINK_STA_INFO_BEACON_RX: rx_beacon value is valid.
+ * @QLINK_STA_INFO_SIGNAL: signal value is valid.
+ * @QLINK_STA_INFO_SIGNAL_AVG: signal_avg value is valid.
+ * @QLINK_STA_INFO_RX_BITRATE: rxrate value is valid.
+ * @QLINK_STA_INFO_TX_BITRATE: txrate value is valid.
+ * @QLINK_STA_INFO_RX_PACKETS: rx_packets value is valid.
+ * @QLINK_STA_INFO_TX_PACKETS: tx_packets value is valid.
+ * @QLINK_STA_INFO_TX_RETRIES: tx_retries value is valid.
+ * @QLINK_STA_INFO_TX_FAILED: tx_failed value is valid.
+ * @QLINK_STA_INFO_STA_FLAGS: sta_flags value is valid.
+ */
+enum qlink_sta_info {
+       QLINK_STA_INFO_CONNECTED_TIME,
+       QLINK_STA_INFO_INACTIVE_TIME,
+       QLINK_STA_INFO_RX_BYTES,
+       QLINK_STA_INFO_TX_BYTES,
+       QLINK_STA_INFO_RX_BYTES64,
+       QLINK_STA_INFO_TX_BYTES64,
+       QLINK_STA_INFO_RX_DROP_MISC,
+       QLINK_STA_INFO_BEACON_RX,
+       QLINK_STA_INFO_SIGNAL,
+       QLINK_STA_INFO_SIGNAL_AVG,
+       QLINK_STA_INFO_RX_BITRATE,
+       QLINK_STA_INFO_TX_BITRATE,
+       QLINK_STA_INFO_RX_PACKETS,
+       QLINK_STA_INFO_TX_PACKETS,
+       QLINK_STA_INFO_TX_RETRIES,
+       QLINK_STA_INFO_TX_FAILED,
+       QLINK_STA_INFO_STA_FLAGS,
+       QLINK_STA_INFO_NUM,
+};
+
+/**
+ * struct qlink_sta_info_rate - STA rate statistics
+ *
+ * @rate: data rate in Mbps.
+ * @flags: bitmap of &enum qlink_sta_info_rate_flags.
+ * @mcs: 802.11-defined MCS index.
+ * nss: Number of Spatial Streams.
+ * @bw: bandwidth, one of &enum qlink_channel_width.
+ */
+struct qlink_sta_info_rate {
+       __le16 rate;
+       u8 flags;
+       u8 mcs;
+       u8 nss;
+       u8 bw;
+} __packed;
+
+/**
+ * struct qlink_sta_stats - data for QTN_TLV_ID_STA_STATS
+ *
+ * Carries statistics of a STA. Not all fields may be filled with
+ * valid values. Valid fields should be indicated as such using a bitmap of
+ * &enum qlink_sta_info. Bitmap is carried separately in a payload of
+ * QTN_TLV_ID_STA_STATS_MAP.
+ */
+struct qlink_sta_stats {
+       __le64 rx_bytes;
+       __le64 tx_bytes;
+       __le64 rx_beacon;
+       __le64 rx_duration;
+       __le64 t_offset;
+       __le32 connected_time;
+       __le32 inactive_time;
+       __le32 rx_packets;
+       __le32 tx_packets;
+       __le32 tx_retries;
+       __le32 tx_failed;
+       __le32 rx_dropped_misc;
+       __le32 beacon_loss_count;
+       __le32 expected_throughput;
+       struct qlink_sta_info_state sta_flags;
+       struct qlink_sta_info_rate txrate;
+       struct qlink_sta_info_rate rxrate;
+       __le16 llid;
+       __le16 plid;
+       u8 local_pm;
+       u8 peer_pm;
+       u8 nonpeer_pm;
+       u8 rx_beacon_signal_avg;
+       u8 plink_state;
+       u8 signal;
+       u8 signal_avg;
+       u8 rsvd[1];
+};
+
 #endif /* _QTN_QLINK_H_ */
index 61d999a..aeeda81 100644 (file)
@@ -100,34 +100,6 @@ static enum nl80211_chan_width qlink_chanwidth_to_nl(u8 qlw)
        }
 }
 
-void qlink_chandef_q2cfg(struct wiphy *wiphy,
-                        const struct qlink_chandef *qch,
-                        struct cfg80211_chan_def *chdef)
-{
-       chdef->center_freq1 = le16_to_cpu(qch->center_freq1);
-       chdef->center_freq2 = le16_to_cpu(qch->center_freq2);
-       chdef->width = qlink_chanwidth_to_nl(qch->width);
-
-       switch (chdef->width) {
-       case NL80211_CHAN_WIDTH_20_NOHT:
-       case NL80211_CHAN_WIDTH_20:
-       case NL80211_CHAN_WIDTH_5:
-       case NL80211_CHAN_WIDTH_10:
-               chdef->chan = ieee80211_get_channel(wiphy, chdef->center_freq1);
-               break;
-       case NL80211_CHAN_WIDTH_40:
-       case NL80211_CHAN_WIDTH_80:
-       case NL80211_CHAN_WIDTH_80P80:
-       case NL80211_CHAN_WIDTH_160:
-               chdef->chan = ieee80211_get_channel(wiphy,
-                                                   chdef->center_freq1 - 10);
-               break;
-       default:
-               chdef->chan = NULL;
-               break;
-       }
-}
-
 static u8 qlink_chanwidth_nl_to_qlink(enum nl80211_chan_width nlwidth)
 {
        switch (nlwidth) {
@@ -152,9 +124,29 @@ static u8 qlink_chanwidth_nl_to_qlink(enum nl80211_chan_width nlwidth)
        }
 }
 
+void qlink_chandef_q2cfg(struct wiphy *wiphy,
+                        const struct qlink_chandef *qch,
+                        struct cfg80211_chan_def *chdef)
+{
+       struct ieee80211_channel *chan;
+
+       chan = ieee80211_get_channel(wiphy, le16_to_cpu(qch->chan.center_freq));
+
+       chdef->chan = chan;
+       chdef->center_freq1 = le16_to_cpu(qch->center_freq1);
+       chdef->center_freq2 = le16_to_cpu(qch->center_freq2);
+       chdef->width = qlink_chanwidth_to_nl(qch->width);
+}
+
 void qlink_chandef_cfg2q(const struct cfg80211_chan_def *chdef,
                         struct qlink_chandef *qch)
 {
+       struct ieee80211_channel *chan = chdef->chan;
+
+       qch->chan.hw_value = cpu_to_le16(chan->hw_value);
+       qch->chan.center_freq = cpu_to_le16(chan->center_freq);
+       qch->chan.flags = cpu_to_le32(chan->flags);
+
        qch->center_freq1 = cpu_to_le16(chdef->center_freq1);
        qch->center_freq2 = cpu_to_le16(chdef->center_freq2);
        qch->width = qlink_chanwidth_nl_to_qlink(chdef->width);
@@ -172,3 +164,33 @@ enum qlink_hidden_ssid qlink_hidden_ssid_nl2q(enum nl80211_hidden_ssid nl_val)
                return QLINK_HIDDEN_SSID_NOT_IN_USE;
        }
 }
+
+bool qtnf_utils_is_bit_set(const u8 *arr, unsigned int bit,
+                          unsigned int arr_max_len)
+{
+       unsigned int idx = bit / BITS_PER_BYTE;
+       u8 mask = 1 << (bit - (idx * BITS_PER_BYTE));
+
+       if (idx >= arr_max_len)
+               return false;
+
+       return arr[idx] & mask;
+}
+
+void qlink_acl_data_cfg2q(const struct cfg80211_acl_data *acl,
+                         struct qlink_acl_data *qacl)
+{
+       switch (acl->acl_policy) {
+       case NL80211_ACL_POLICY_ACCEPT_UNLESS_LISTED:
+               qacl->policy =
+                       cpu_to_le32(QLINK_ACL_POLICY_ACCEPT_UNLESS_LISTED);
+               break;
+       case NL80211_ACL_POLICY_DENY_UNLESS_LISTED:
+               qacl->policy = cpu_to_le32(QLINK_ACL_POLICY_DENY_UNLESS_LISTED);
+               break;
+       }
+
+       qacl->num_entries = cpu_to_le32(acl->n_acl_entries);
+       memcpy(qacl->mac_addrs, acl->mac_addrs,
+              acl->n_acl_entries * sizeof(*qacl->mac_addrs));
+}
index 260383d..54caeb3 100644 (file)
@@ -69,5 +69,9 @@ void qlink_chandef_q2cfg(struct wiphy *wiphy,
 void qlink_chandef_cfg2q(const struct cfg80211_chan_def *chdef,
                         struct qlink_chandef *qch);
 enum qlink_hidden_ssid qlink_hidden_ssid_nl2q(enum nl80211_hidden_ssid nl_val);
+bool qtnf_utils_is_bit_set(const u8 *arr, unsigned int bit,
+                          unsigned int arr_max_len);
+void qlink_acl_data_cfg2q(const struct cfg80211_acl_data *acl,
+                         struct qlink_acl_data *qacl);
 
 #endif /* _QTN_FMAC_QLINK_UTIL_H_ */
index ed38e87..e745733 100644 (file)
@@ -57,9 +57,10 @@ struct qtnf_sta_node *qtnf_sta_list_lookup_index(struct qtnf_sta_list *list,
        return NULL;
 }
 
-struct qtnf_sta_node *qtnf_sta_list_add(struct qtnf_sta_list *list,
+struct qtnf_sta_node *qtnf_sta_list_add(struct qtnf_vif *vif,
                                        const u8 *mac)
 {
+       struct qtnf_sta_list *list = &vif->sta_list;
        struct qtnf_sta_node *node;
 
        if (unlikely(!mac))
@@ -77,13 +78,15 @@ struct qtnf_sta_node *qtnf_sta_list_add(struct qtnf_sta_list *list,
        ether_addr_copy(node->mac_addr, mac);
        list_add_tail(&node->list, &list->head);
        atomic_inc(&list->size);
+       ++vif->generation;
 
 done:
        return node;
 }
 
-bool qtnf_sta_list_del(struct qtnf_sta_list *list, const u8 *mac)
+bool qtnf_sta_list_del(struct qtnf_vif *vif, const u8 *mac)
 {
+       struct qtnf_sta_list *list = &vif->sta_list;
        struct qtnf_sta_node *node;
        bool ret = false;
 
@@ -93,6 +96,7 @@ bool qtnf_sta_list_del(struct qtnf_sta_list *list, const u8 *mac)
                list_del(&node->list);
                atomic_dec(&list->size);
                kfree(node);
+               ++vif->generation;
                ret = true;
        }
 
index 0359eae..0d4d92b 100644 (file)
@@ -26,9 +26,9 @@ struct qtnf_sta_node *qtnf_sta_list_lookup(struct qtnf_sta_list *list,
                                           const u8 *mac);
 struct qtnf_sta_node *qtnf_sta_list_lookup_index(struct qtnf_sta_list *list,
                                                 size_t index);
-struct qtnf_sta_node *qtnf_sta_list_add(struct qtnf_sta_list *list,
+struct qtnf_sta_node *qtnf_sta_list_add(struct qtnf_vif *vif,
                                        const u8 *mac);
-bool qtnf_sta_list_del(struct qtnf_sta_list *list, const u8 *mac);
+bool qtnf_sta_list_del(struct qtnf_vif *vif, const u8 *mac);
 
 void qtnf_sta_list_free(struct qtnf_sta_list *list);
 
index d2c2894..429d07b 100644 (file)
@@ -4903,7 +4903,7 @@ void rt2800_vco_calibration(struct rt2x00_dev *rt2x00dev)
                min_sleep = 2000;
                break;
        default:
-               WARN_ONCE(1, "Not supported RF chipet %x for VCO recalibration",
+               WARN_ONCE(1, "Not supported RF chipset %x for VCO recalibration",
                          rt2x00dev->chip.rf);
                return;
        }
index ecc9631..a971bc7 100644 (file)
@@ -142,32 +142,28 @@ void rt2x00mac_tx(struct ieee80211_hw *hw,
        if (!rt2x00dev->ops->hw->set_rts_threshold &&
            (tx_info->control.rates[0].flags & (IEEE80211_TX_RC_USE_RTS_CTS |
                                                IEEE80211_TX_RC_USE_CTS_PROTECT))) {
-               if (rt2x00queue_available(queue) <= 1)
-                       goto exit_fail;
+               if (rt2x00queue_available(queue) <= 1) {
+                       /*
+                        * Recheck for full queue under lock to avoid race
+                        * conditions with rt2x00lib_txdone().
+                        */
+                       spin_lock(&queue->tx_lock);
+                       if (rt2x00queue_threshold(queue))
+                               rt2x00queue_pause_queue(queue);
+                       spin_unlock(&queue->tx_lock);
+
+                       goto exit_free_skb;
+               }
 
                if (rt2x00mac_tx_rts_cts(rt2x00dev, queue, skb))
-                       goto exit_fail;
+                       goto exit_free_skb;
        }
 
        if (unlikely(rt2x00queue_write_tx_frame(queue, skb, control->sta, false)))
-               goto exit_fail;
-
-       /*
-        * Pausing queue has to be serialized with rt2x00lib_txdone(). Note
-        * we should not use spin_lock_bh variant as bottom halve was already
-        * disabled before ieee80211_xmit() call.
-        */
-       spin_lock(&queue->tx_lock);
-       if (rt2x00queue_threshold(queue))
-               rt2x00queue_pause_queue(queue);
-       spin_unlock(&queue->tx_lock);
+               goto exit_free_skb;
 
        return;
 
- exit_fail:
-       spin_lock(&queue->tx_lock);
-       rt2x00queue_pause_queue(queue);
-       spin_unlock(&queue->tx_lock);
  exit_free_skb:
        ieee80211_free_txskb(hw, skb);
 }
index a2c1ca5..a6884e7 100644 (file)
@@ -715,6 +715,14 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
        rt2x00queue_kick_tx_queue(queue, &txdesc);
 
 out:
+       /*
+        * Pausing queue has to be serialized with rt2x00lib_txdone(), so we
+        * do this under queue->tx_lock. Bottom halve was already disabled
+        * before ieee80211_xmit() call.
+        */
+       if (rt2x00queue_threshold(queue))
+               rt2x00queue_pause_queue(queue);
+
        spin_unlock(&queue->tx_lock);
        return ret;
 }
@@ -1244,10 +1252,8 @@ int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev)
        rt2x00dev->data_queues = 2 + rt2x00dev->ops->tx_queues + req_atim;
 
        queue = kcalloc(rt2x00dev->data_queues, sizeof(*queue), GFP_KERNEL);
-       if (!queue) {
-               rt2x00_err(rt2x00dev, "Queue allocation failed\n");
+       if (!queue)
                return -ENOMEM;
-       }
 
        /*
         * Initialize pointers
index 704741d..0ba9c0c 100644 (file)
@@ -859,8 +859,8 @@ static u8 _rtl_get_highest_n_rate(struct ieee80211_hw *hw,
        struct rtl_phy *rtlphy = &rtlpriv->phy;
        u8 hw_rate;
 
-       if ((get_rf_type(rtlphy) == RF_2T2R) &&
-           (sta->ht_cap.mcs.rx_mask[1] != 0))
+       if (get_rf_type(rtlphy) == RF_2T2R &&
+           sta->ht_cap.mcs.rx_mask[1] != 0)
                hw_rate = rtlpriv->cfg->maps[RTL_RC_HT_RATEMCS15];
        else
                hw_rate = rtlpriv->cfg->maps[RTL_RC_HT_RATEMCS7];
@@ -1180,7 +1180,7 @@ void rtl_get_tcb_desc(struct ieee80211_hw *hw,
                                tcb_desc->hw_rate =
                                _rtl_get_vht_highest_n_rate(hw, sta);
                        } else {
-                               if (sta && (sta->ht_cap.ht_supported)) {
+                               if (sta && sta->ht_cap.ht_supported) {
                                        tcb_desc->hw_rate =
                                                _rtl_get_highest_n_rate(hw, sta);
                                } else {
@@ -1321,6 +1321,10 @@ bool rtl_action_proc(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
                                  le16_to_cpu(mgmt->u.action.u.addba_req.capab);
                                tid = (capab &
                                       IEEE80211_ADDBA_PARAM_TID_MASK) >> 2;
+                               if (tid >= MAX_TID_COUNT) {
+                                       rcu_read_unlock();
+                                       return true;
+                               }
                                tid_data = &sta_entry->tids[tid];
                                if (tid_data->agg.rx_agg_state ==
                                    RTL_RX_AGG_START)
@@ -1972,9 +1976,9 @@ void rtl_watchdog_wq_callback(void *data)
                    rtlpriv->btcoexist.btc_ops->btc_is_bt_ctrl_lps(rtlpriv))
                        goto label_lps_done;
 
-               if (((rtlpriv->link_info.num_rx_inperiod +
-                     rtlpriv->link_info.num_tx_inperiod) > 8) ||
-                   (rtlpriv->link_info.num_rx_inperiod > 2))
+               if (rtlpriv->link_info.num_rx_inperiod +
+                     rtlpriv->link_info.num_tx_inperiod > 8 ||
+                   rtlpriv->link_info.num_rx_inperiod > 2)
                        rtl_lps_leave(hw);
                else
                        rtl_lps_enter(hw);
@@ -2225,9 +2229,7 @@ static struct sk_buff *rtl_make_smps_action(struct ieee80211_hw *hw,
        case IEEE80211_SMPS_AUTOMATIC:/* 0 */
        case IEEE80211_SMPS_NUM_MODES:/* 4 */
                WARN_ON(1);
-       /* Here will get a 'MISSING_BREAK' in Coverity Test, just ignore it.
-        * According to Kernel Code, here is right.
-        */
+       /* fall through */
        case IEEE80211_SMPS_OFF:/* 1 */ /*MIMO_PS_NOLIMIT*/
                action_frame->u.action.u.ht_smps.smps_control =
                                WLAN_HT_SMPS_CONTROL_DISABLED;/* 0 */
@@ -2528,6 +2530,9 @@ static int __init rtl_core_module_init(void)
        if (rtl_rate_control_register())
                pr_err("rtl: Unable to register rtl_rc, use default RC !!\n");
 
+       /* add debugfs */
+       rtl_debugfs_add_topdir();
+
        /* init some global vars */
        INIT_LIST_HEAD(&rtl_global_var.glb_priv_list);
        spin_lock_init(&rtl_global_var.glb_list_lock);
@@ -2539,6 +2544,9 @@ static void __exit rtl_core_module_exit(void)
 {
        /*RC*/
        rtl_rate_control_unregister();
+
+       /* remove debugfs */
+       rtl_debugfs_remove_topdir();
 }
 
 module_init(rtl_core_module_init);
index 44c2572..8fce371 100644 (file)
@@ -2704,11 +2704,11 @@ void ex_btc8192e2ant_init_coex_dm(struct btc_coexist *btcoexist)
        btc8192e2ant_init_coex_dm(btcoexist);
 }
 
-void ex_btc8192e2ant_display_coex_info(struct btc_coexist *btcoexist)
+void ex_btc8192e2ant_display_coex_info(struct btc_coexist *btcoexist,
+                                      struct seq_file *m)
 {
        struct btc_board_info *board_info = &btcoexist->board_info;
        struct btc_stack_info *stack_info = &btcoexist->stack_info;
-       struct rtl_priv *rtlpriv = btcoexist->adapter;
        u8 u8tmp[4], i, bt_info_ext, ps_tdma_case = 0;
        u16 u16tmp[4];
        u32 u32tmp[4];
@@ -2719,75 +2719,64 @@ void ex_btc8192e2ant_display_coex_info(struct btc_coexist *btcoexist)
        u8 wifi_dot11_chnl, wifi_hs_chnl;
        u32 fw_ver = 0, bt_patch_ver = 0;
 
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                "\r\n ============[BT Coexist info]============");
+       seq_puts(m, "\n ============[BT Coexist info]============");
 
        if (btcoexist->manual_control) {
-               RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                        "\r\n ===========[Under Manual Control]===========");
-               RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                        "\r\n ==========================================");
-       }
-
-       if (!board_info->bt_exist) {
-               RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n BT not exists !!!");
-               return;
+               seq_puts(m, "\n ===========[Under Manual Control]===========");
+               seq_puts(m, "\n ==========================================");
        }
 
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                "\r\n %-35s = %d/ %d ", "Ant PG number/ Ant mechanism:",
+       seq_printf(m, "\n %-35s = %d/ %d ", "Ant PG number/ Ant mechanism:",
                   board_info->pg_ant_num, board_info->btdm_ant_num);
 
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s / %d",
-                "BT stack/ hci ext ver",
+       seq_printf(m, "\n %-35s = %s / %d", "BT stack/ hci ext ver",
                   ((stack_info->profile_notified) ? "Yes" : "No"),
                   stack_info->hci_version);
 
        btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER, &bt_patch_ver);
        btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                "\r\n %-35s = %d_%d/ 0x%x/ 0x%x(%d)",
-                "CoexVer/ FwVer/ PatchVer",
-                glcoex_ver_date_8192e_2ant, glcoex_ver_8192e_2ant,
-                fw_ver, bt_patch_ver, bt_patch_ver);
+       seq_printf(m, "\n %-35s = %d_%d/ 0x%x/ 0x%x(%d)",
+                  "CoexVer/ FwVer/ PatchVer",
+                  glcoex_ver_date_8192e_2ant, glcoex_ver_8192e_2ant,
+                  fw_ver, bt_patch_ver, bt_patch_ver);
 
        btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
        btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_DOT11_CHNL,
                           &wifi_dot11_chnl);
        btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_HS_CHNL, &wifi_hs_chnl);
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d / %d(%d)",
-                "Dot11 channel / HsMode(HsChnl)",
-                wifi_dot11_chnl, bt_hs_on, wifi_hs_chnl);
+       seq_printf(m, "\n %-35s = %d / %d(%d)",
+                  "Dot11 channel / HsMode(HsChnl)",
+                  wifi_dot11_chnl, bt_hs_on, wifi_hs_chnl);
 
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %3ph ",
-                "H2C Wifi inform bt chnl Info", coex_dm->wifi_chnl_info);
+       seq_printf(m, "\n %-35s = %3ph ",
+                  "H2C Wifi inform bt chnl Info", coex_dm->wifi_chnl_info);
 
        btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifi_rssi);
        btcoexist->btc_get(btcoexist, BTC_GET_S4_HS_RSSI, &bt_hs_rssi);
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d",
-                "Wifi rssi/ HS rssi", wifi_rssi, bt_hs_rssi);
+       seq_printf(m, "\n %-35s = %d/ %d",
+                  "Wifi rssi/ HS rssi", wifi_rssi, bt_hs_rssi);
 
        btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
        btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
        btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam);
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d/ %d ",
-                "Wifi link/ roam/ scan", link, roam, scan);
+       seq_printf(m, "\n %-35s = %d/ %d/ %d ",
+                  "Wifi link/ roam/ scan", link, roam, scan);
 
        btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g);
        btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
        btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
        btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_TRAFFIC_DIRECTION,
                           &wifi_traffic_dir);
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s / %s/ %s ",
-                "Wifi status", (wifi_under_5g ? "5G" : "2.4G"),
+       seq_printf(m, "\n %-35s = %s / %s/ %s ",
+                  "Wifi status", (wifi_under_5g ? "5G" : "2.4G"),
                   ((BTC_WIFI_BW_LEGACY == wifi_bw) ? "Legacy" :
                        (((BTC_WIFI_BW_HT40 == wifi_bw) ? "HT40" : "HT20"))),
                   ((!wifi_busy) ? "idle" :
                        ((BTC_WIFI_TRAFFIC_TX == wifi_traffic_dir) ?
                                "uplink" : "downlink")));
 
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = [%s/ %d/ %d] ",
-                "BT [status/ rssi/ retryCnt]",
+       seq_printf(m, "\n %-35s = [%s/ %d/ %d] ",
+                  "BT [status/ rssi/ retryCnt]",
                   ((btcoexist->bt_info.bt_disabled) ? ("disabled") :
                    ((coex_sta->c2h_bt_inquiry_page) ?
                     ("inquiry/page scan") :
@@ -2797,131 +2786,129 @@ void ex_btc8192e2ant_display_coex_info(struct btc_coexist *btcoexist)
                           coex_dm->bt_status) ? "connected-idle" : "busy")))),
                   coex_sta->bt_rssi, coex_sta->bt_retry_cnt);
 
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d / %d / %d / %d",
-                "SCO/HID/PAN/A2DP", stack_info->sco_exist,
+       seq_printf(m, "\n %-35s = %d / %d / %d / %d",
+                  "SCO/HID/PAN/A2DP", stack_info->sco_exist,
                   stack_info->hid_exist, stack_info->pan_exist,
                   stack_info->a2dp_exist);
-       btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_BT_LINK_INFO);
+       btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_BT_LINK_INFO, m);
 
        bt_info_ext = coex_sta->bt_info_ext;
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s",
-                "BT Info A2DP rate",
+       seq_printf(m, "\n %-35s = %s",
+                  "BT Info A2DP rate",
                   (bt_info_ext&BIT0) ? "Basic rate" : "EDR rate");
 
        for (i = 0; i < BT_INFO_SRC_8192E_2ANT_MAX; i++) {
                if (coex_sta->bt_info_c2h_cnt[i]) {
-                       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                                "\r\n %-35s = %7ph(%d)",
-                                glbt_info_src_8192e_2ant[i],
-                                coex_sta->bt_info_c2h[i],
-                                coex_sta->bt_info_c2h_cnt[i]);
+                       seq_printf(m, "\n %-35s = %7ph(%d)",
+                                  glbt_info_src_8192e_2ant[i],
+                                  coex_sta->bt_info_c2h[i],
+                                  coex_sta->bt_info_c2h_cnt[i]);
                }
        }
 
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s/%s",
-                "PS state, IPS/LPS",
-                ((coex_sta->under_ips ? "IPS ON" : "IPS OFF")),
-                ((coex_sta->under_lps ? "LPS ON" : "LPS OFF")));
-       btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_FW_PWR_MODE_CMD);
+       seq_printf(m, "\n %-35s = %s/%s",
+                  "PS state, IPS/LPS",
+                  ((coex_sta->under_ips ? "IPS ON" : "IPS OFF")),
+                  ((coex_sta->under_lps ? "LPS ON" : "LPS OFF")));
+       btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_FW_PWR_MODE_CMD, m);
 
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x ", "SS Type",
-                coex_dm->cur_ss_type);
+       seq_printf(m, "\n %-35s = 0x%x ", "SS Type",
+                  coex_dm->cur_ss_type);
 
        /* Sw mechanism */
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s",
-                "============[Sw mechanism]============");
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d/ %d ",
-                "SM1[ShRf/ LpRA/ LimDig]", coex_dm->cur_rf_rx_lpf_shrink,
-                coex_dm->cur_low_penalty_ra, coex_dm->limited_dig);
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d/ %d(0x%x) ",
-                "SM2[AgcT/ AdcB/ SwDacSwing(lvl)]",
-                coex_dm->cur_agc_table_en, coex_dm->cur_adc_back_off,
-                coex_dm->cur_dac_swing_on, coex_dm->cur_dac_swing_lvl);
-
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x ", "Rate Mask",
-                btcoexist->bt_info.ra_mask);
+       seq_printf(m, "\n %-35s",
+                  "============[Sw mechanism]============");
+       seq_printf(m, "\n %-35s = %d/ %d/ %d ",
+                  "SM1[ShRf/ LpRA/ LimDig]", coex_dm->cur_rf_rx_lpf_shrink,
+                  coex_dm->cur_low_penalty_ra, coex_dm->limited_dig);
+       seq_printf(m, "\n %-35s = %d/ %d/ %d(0x%x) ",
+                  "SM2[AgcT/ AdcB/ SwDacSwing(lvl)]",
+                  coex_dm->cur_agc_table_en, coex_dm->cur_adc_back_off,
+                  coex_dm->cur_dac_swing_on, coex_dm->cur_dac_swing_lvl);
+
+       seq_printf(m, "\n %-35s = 0x%x ", "Rate Mask",
+                  btcoexist->bt_info.ra_mask);
 
        /* Fw mechanism */
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s",
-                "============[Fw mechanism]============");
+       seq_printf(m, "\n %-35s",
+                  "============[Fw mechanism]============");
 
        ps_tdma_case = coex_dm->cur_ps_tdma;
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                "\r\n %-35s = %5ph case-%d (auto:%d)",
-                "PS TDMA", coex_dm->ps_tdma_para,
-                ps_tdma_case, coex_dm->auto_tdma_adjust);
+       seq_printf(m,
+                  "\n %-35s = %5ph case-%d (auto:%d)",
+                  "PS TDMA", coex_dm->ps_tdma_para,
+                  ps_tdma_case, coex_dm->auto_tdma_adjust);
 
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d ",
-                "DecBtPwr/ IgnWlanAct",
-                coex_dm->cur_dec_bt_pwr, coex_dm->cur_ignore_wlan_act);
+       seq_printf(m, "\n %-35s = %d/ %d ",
+                  "DecBtPwr/ IgnWlanAct",
+                  coex_dm->cur_dec_bt_pwr, coex_dm->cur_ignore_wlan_act);
 
        /* Hw setting */
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s",
-                "============[Hw setting]============");
+       seq_printf(m, "\n %-35s",
+                  "============[Hw setting]============");
 
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x",
-                "RF-A, 0x1e initVal", coex_dm->bt_rf0x1e_backup);
+       seq_printf(m, "\n %-35s = 0x%x",
+                  "RF-A, 0x1e initVal", coex_dm->bt_rf0x1e_backup);
 
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/0x%x/0x%x/0x%x",
-                "backup ARFR1/ARFR2/RL/AMaxTime", coex_dm->backup_arfr_cnt1,
-                coex_dm->backup_arfr_cnt2, coex_dm->backup_retry_limit,
-                coex_dm->backup_ampdu_maxtime);
+       seq_printf(m, "\n %-35s = 0x%x/0x%x/0x%x/0x%x",
+                  "backup ARFR1/ARFR2/RL/AMaxTime", coex_dm->backup_arfr_cnt1,
+                  coex_dm->backup_arfr_cnt2, coex_dm->backup_retry_limit,
+                  coex_dm->backup_ampdu_maxtime);
 
        u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x430);
        u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x434);
        u16tmp[0] = btcoexist->btc_read_2byte(btcoexist, 0x42a);
        u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x456);
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/0x%x/0x%x/0x%x",
-                "0x430/0x434/0x42a/0x456",
-                u32tmp[0], u32tmp[1], u16tmp[0], u8tmp[0]);
+       seq_printf(m, "\n %-35s = 0x%x/0x%x/0x%x/0x%x",
+                  "0x430/0x434/0x42a/0x456",
+                  u32tmp[0], u32tmp[1], u16tmp[0], u8tmp[0]);
 
        u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xc04);
        u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0xd04);
        u32tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x90c);
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
-                "0xc04/ 0xd04/ 0x90c", u32tmp[0], u32tmp[1], u32tmp[2]);
+       seq_printf(m, "\n %-35s = 0x%x/ 0x%x/ 0x%x",
+                  "0xc04/ 0xd04/ 0x90c", u32tmp[0], u32tmp[1], u32tmp[2]);
 
        u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x778);
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x", "0x778",
-                u8tmp[0]);
+       seq_printf(m, "\n %-35s = 0x%x", "0x778", u8tmp[0]);
 
        u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x92c);
        u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x930);
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x",
-                "0x92c/ 0x930", (u8tmp[0]), u32tmp[0]);
+       seq_printf(m, "\n %-35s = 0x%x/ 0x%x",
+                  "0x92c/ 0x930", (u8tmp[0]), u32tmp[0]);
 
        u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x40);
        u8tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0x4f);
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x",
-                "0x40/ 0x4f", u8tmp[0], u8tmp[1]);
+       seq_printf(m, "\n %-35s = 0x%x/ 0x%x",
+                  "0x40/ 0x4f", u8tmp[0], u8tmp[1]);
 
        u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x550);
        u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x522);
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x",
-                "0x550(bcn ctrl)/0x522", u32tmp[0], u8tmp[0]);
+       seq_printf(m, "\n %-35s = 0x%x/ 0x%x",
+                  "0x550(bcn ctrl)/0x522", u32tmp[0], u8tmp[0]);
 
        u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xc50);
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x", "0xc50(dig)",
-                u32tmp[0]);
+       seq_printf(m, "\n %-35s = 0x%x", "0xc50(dig)",
+                  u32tmp[0]);
 
        u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x6c0);
        u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x6c4);
        u32tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x6c8);
        u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x6cc);
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x",
-                "0x6c0/0x6c4/0x6c8/0x6cc(coexTable)",
-                u32tmp[0], u32tmp[1], u32tmp[2], u8tmp[0]);
-
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d",
-                "0x770(hp rx[31:16]/tx[15:0])",
-                coex_sta->high_priority_rx, coex_sta->high_priority_tx);
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d",
-                "0x774(lp rx[31:16]/tx[15:0])",
-                coex_sta->low_priority_rx, coex_sta->low_priority_tx);
+       seq_printf(m,
+                  "\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x",
+                  "0x6c0/0x6c4/0x6c8/0x6cc(coexTable)",
+                  u32tmp[0], u32tmp[1], u32tmp[2], u8tmp[0]);
+
+       seq_printf(m, "\n %-35s = %d/ %d",
+                  "0x770(hp rx[31:16]/tx[15:0])",
+                  coex_sta->high_priority_rx, coex_sta->high_priority_tx);
+       seq_printf(m, "\n %-35s = %d/ %d",
+                  "0x774(lp rx[31:16]/tx[15:0])",
+                  coex_sta->low_priority_rx, coex_sta->low_priority_tx);
        if (btcoexist->auto_report_2ant)
                btc8192e2ant_monitor_bt_ctr(btcoexist);
-       btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_COEX_STATISTICS);
+       btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_COEX_STATISTICS, m);
 }
 
 void ex_btc8192e2ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
index 65502ac..b8c95c7 100644 (file)
@@ -180,4 +180,5 @@ void ex_btc8192e2ant_stack_operation_notify(struct btc_coexist *btcoexist,
                                            u8 type);
 void ex_btc8192e2ant_halt_notify(struct btc_coexist *btcoexist);
 void ex_btc8192e2ant_periodical(struct btc_coexist *btcoexist);
-void ex_btc8192e2ant_display_coex_info(struct btc_coexist *btcoexist);
+void ex_btc8192e2ant_display_coex_info(struct btc_coexist *btcoexist,
+                                      struct seq_file *m);
index 5f726f6..fd3b1fb 100644 (file)
@@ -2474,12 +2474,12 @@ void ex_btc8723b1ant_init_coex_dm(struct btc_coexist *btcoexist)
        halbtc8723b1ant_query_bt_info(btcoexist);
 }
 
-void ex_btc8723b1ant_display_coex_info(struct btc_coexist *btcoexist)
+void ex_btc8723b1ant_display_coex_info(struct btc_coexist *btcoexist,
+                                      struct seq_file *m)
 {
        struct btc_board_info *board_info = &btcoexist->board_info;
        struct btc_stack_info *stack_info = &btcoexist->stack_info;
        struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
-       struct rtl_priv *rtlpriv = btcoexist->adapter;
        u8 u8tmp[4], i, bt_info_ext, pstdmacase = 0;
        u16 u16tmp[4];
        u32 u32tmp[4];
@@ -2491,62 +2491,56 @@ void ex_btc8723b1ant_display_coex_info(struct btc_coexist *btcoexist)
        u8 wifi_dot11_chnl, wifi_hs_chnl;
        u32 fw_ver = 0, bt_patch_ver = 0;
 
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                "\r\n ============[BT Coexist info]============");
+       seq_puts(m, "\n ============[BT Coexist info]============");
 
        if (btcoexist->manual_control) {
-               RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                        "\r\n ============[Under Manual Control]==========");
-               RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                        "\r\n ==========================================");
+               seq_puts(m, "\n ============[Under Manual Control]==========");
+               seq_puts(m, "\n ==========================================");
        }
        if (btcoexist->stop_coex_dm) {
-               RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                        "\r\n ============[Coex is STOPPED]============");
-               RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                        "\r\n ==========================================");
+               seq_puts(m, "\n ============[Coex is STOPPED]============");
+               seq_puts(m, "\n ==========================================");
        }
 
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d/ %d",
-                "Ant PG Num/ Ant Mech/ Ant Pos:",
-                board_info->pg_ant_num, board_info->btdm_ant_num,
-                board_info->btdm_ant_pos);
+       seq_printf(m, "\n %-35s = %d/ %d/ %d",
+                  "Ant PG Num/ Ant Mech/ Ant Pos:",
+                  board_info->pg_ant_num, board_info->btdm_ant_num,
+                  board_info->btdm_ant_pos);
 
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s / %d",
-                "BT stack/ hci ext ver",
-                ((stack_info->profile_notified) ? "Yes" : "No"),
-                stack_info->hci_version);
+       seq_printf(m, "\n %-35s = %s / %d",
+                  "BT stack/ hci ext ver",
+                  ((stack_info->profile_notified) ? "Yes" : "No"),
+                  stack_info->hci_version);
 
        btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER, &bt_patch_ver);
        btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                "\r\n %-35s = %d_%x/ 0x%x/ 0x%x(%d)",
-                "CoexVer/ FwVer/ PatchVer",
-                glcoex_ver_date_8723b_1ant, glcoex_ver_8723b_1ant,
-                fw_ver, bt_patch_ver, bt_patch_ver);
+       seq_printf(m, "\n %-35s = %d_%x/ 0x%x/ 0x%x(%d)",
+                  "CoexVer/ FwVer/ PatchVer",
+                  glcoex_ver_date_8723b_1ant, glcoex_ver_8723b_1ant,
+                  fw_ver, bt_patch_ver, bt_patch_ver);
 
        btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
        btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_DOT11_CHNL,
                           &wifi_dot11_chnl);
        btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_HS_CHNL, &wifi_hs_chnl);
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d / %d(%d)",
-                "Dot11 channel / HsChnl(HsMode)",
-                wifi_dot11_chnl, wifi_hs_chnl, bt_hs_on);
+       seq_printf(m, "\n %-35s = %d / %d(%d)",
+                  "Dot11 channel / HsChnl(HsMode)",
+                  wifi_dot11_chnl, wifi_hs_chnl, bt_hs_on);
 
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %3ph ",
-                "H2C Wifi inform bt chnl Info",
-                coex_dm->wifi_chnl_info);
+       seq_printf(m, "\n %-35s = %3ph ",
+                  "H2C Wifi inform bt chnl Info",
+                  coex_dm->wifi_chnl_info);
 
        btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifi_rssi);
        btcoexist->btc_get(btcoexist, BTC_GET_S4_HS_RSSI, &bt_hs_rssi);
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d",
-                "Wifi rssi/ HS rssi", wifi_rssi, bt_hs_rssi);
+       seq_printf(m, "\n %-35s = %d/ %d",
+                  "Wifi rssi/ HS rssi", wifi_rssi, bt_hs_rssi);
 
        btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
        btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
        btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam);
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d/ %d ",
-                "Wifi link/ roam/ scan", link, roam, scan);
+       seq_printf(m, "\n %-35s = %d/ %d/ %d ",
+                  "Wifi link/ roam/ scan", link, roam, scan);
 
        btcoexist->btc_get(btcoexist , BTC_GET_BL_WIFI_UNDER_5G,
                           &wifi_under_5g);
@@ -2555,110 +2549,106 @@ void ex_btc8723b1ant_display_coex_info(struct btc_coexist *btcoexist)
        btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_TRAFFIC_DIRECTION,
                           &wifi_traffic_dir);
 
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s / %s/ %s ",
-                "Wifi status", (wifi_under_5g ? "5G" : "2.4G"),
-                ((wifi_bw == BTC_WIFI_BW_LEGACY) ? "Legacy" :
-                 ((wifi_bw == BTC_WIFI_BW_HT40) ? "HT40" : "HT20")),
-                 ((!wifi_busy) ? "idle" :
-                  ((wifi_traffic_dir == BTC_WIFI_TRAFFIC_TX) ?
-                  "uplink" : "downlink")));
+       seq_printf(m, "\n %-35s = %s / %s/ %s ",
+                  "Wifi status", (wifi_under_5g ? "5G" : "2.4G"),
+                  ((wifi_bw == BTC_WIFI_BW_LEGACY) ? "Legacy" :
+                   ((wifi_bw == BTC_WIFI_BW_HT40) ? "HT40" : "HT20")),
+                   ((!wifi_busy) ? "idle" :
+                    ((wifi_traffic_dir == BTC_WIFI_TRAFFIC_TX) ?
+                    "uplink" : "downlink")));
 
        btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_LINK_STATUS,
                           &wifi_link_status);
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d/ %d/ %d/ %d",
-                "sta/vwifi/hs/p2pGo/p2pGc",
-                ((wifi_link_status & WIFI_STA_CONNECTED) ? 1 : 0),
-                ((wifi_link_status & WIFI_AP_CONNECTED) ? 1 : 0),
-                ((wifi_link_status & WIFI_HS_CONNECTED) ? 1 : 0),
-                ((wifi_link_status & WIFI_P2P_GO_CONNECTED) ? 1 : 0),
-                ((wifi_link_status & WIFI_P2P_GC_CONNECTED) ? 1 : 0));
-
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = [%s/ %d/ %d] ",
-                "BT [status/ rssi/ retryCnt]",
-                ((coex_sta->bt_disabled) ? ("disabled") :
-                 ((coex_sta->c2h_bt_inquiry_page) ? ("inquiry/page scan") :
-                  ((BT_8723B_1ANT_BT_STATUS_NON_CONNECTED_IDLE ==
-                    coex_dm->bt_status) ?
-                   "non-connected idle" :
-                   ((BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE ==
-                     coex_dm->bt_status) ?
-                    "connected-idle" : "busy")))),
-                    coex_sta->bt_rssi, coex_sta->bt_retry_cnt);
-
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                "\r\n %-35s = %d / %d / %d / %d",
-                "SCO/HID/PAN/A2DP", bt_link_info->sco_exist,
-                bt_link_info->hid_exist, bt_link_info->pan_exist,
-                bt_link_info->a2dp_exist);
-       btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_BT_LINK_INFO);
+       seq_printf(m, "\n %-35s = %d/ %d/ %d/ %d/ %d",
+                  "sta/vwifi/hs/p2pGo/p2pGc",
+                  ((wifi_link_status & WIFI_STA_CONNECTED) ? 1 : 0),
+                  ((wifi_link_status & WIFI_AP_CONNECTED) ? 1 : 0),
+                  ((wifi_link_status & WIFI_HS_CONNECTED) ? 1 : 0),
+                  ((wifi_link_status & WIFI_P2P_GO_CONNECTED) ? 1 : 0),
+                  ((wifi_link_status & WIFI_P2P_GC_CONNECTED) ? 1 : 0));
+
+       seq_printf(m, "\n %-35s = [%s/ %d/ %d] ",
+                  "BT [status/ rssi/ retryCnt]",
+                  ((coex_sta->bt_disabled) ? ("disabled") :
+                   ((coex_sta->c2h_bt_inquiry_page) ? ("inquiry/page scan") :
+                    ((BT_8723B_1ANT_BT_STATUS_NON_CONNECTED_IDLE ==
+                      coex_dm->bt_status) ?
+                     "non-connected idle" :
+                     ((BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE ==
+                       coex_dm->bt_status) ?
+                      "connected-idle" : "busy")))),
+                      coex_sta->bt_rssi, coex_sta->bt_retry_cnt);
+
+       seq_printf(m, "\n %-35s = %d / %d / %d / %d",
+                  "SCO/HID/PAN/A2DP", bt_link_info->sco_exist,
+                  bt_link_info->hid_exist, bt_link_info->pan_exist,
+                  bt_link_info->a2dp_exist);
+       btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_BT_LINK_INFO, m);
 
        bt_info_ext = coex_sta->bt_info_ext;
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s",
-                "BT Info A2DP rate",
-                (bt_info_ext & BIT0) ? "Basic rate" : "EDR rate");
+       seq_printf(m, "\n %-35s = %s",
+                  "BT Info A2DP rate",
+                  (bt_info_ext & BIT0) ? "Basic rate" : "EDR rate");
 
        for (i = 0; i < BT_INFO_SRC_8723B_1ANT_MAX; i++) {
                if (coex_sta->bt_info_c2h_cnt[i]) {
-                       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                                "\r\n %-35s = %7ph(%d)",
-                                glbt_info_src_8723b_1ant[i],
-                                coex_sta->bt_info_c2h[i],
-                                coex_sta->bt_info_c2h_cnt[i]);
+                       seq_printf(m, "\n %-35s = %7ph(%d)",
+                                  glbt_info_src_8723b_1ant[i],
+                                  coex_sta->bt_info_c2h[i],
+                                  coex_sta->bt_info_c2h_cnt[i]);
                }
        }
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                "\r\n %-35s = %s/%s, (0x%x/0x%x)",
-                "PS state, IPS/LPS, (lps/rpwm)",
-                ((coex_sta->under_ips ? "IPS ON" : "IPS OFF")),
-                ((coex_sta->under_lps ? "LPS ON" : "LPS OFF")),
-                btcoexist->bt_info.lps_val,
-                btcoexist->bt_info.rpwm_val);
-       btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_FW_PWR_MODE_CMD);
+       seq_printf(m, "\n %-35s = %s/%s, (0x%x/0x%x)",
+                  "PS state, IPS/LPS, (lps/rpwm)",
+                  ((coex_sta->under_ips ? "IPS ON" : "IPS OFF")),
+                  ((coex_sta->under_lps ? "LPS ON" : "LPS OFF")),
+                  btcoexist->bt_info.lps_val,
+                  btcoexist->bt_info.rpwm_val);
+       btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_FW_PWR_MODE_CMD, m);
 
        if (!btcoexist->manual_control) {
                /* Sw mechanism */
-               RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s",
-                        "============[Sw mechanism]============");
+               seq_printf(m, "\n %-35s",
+                          "============[Sw mechanism]============");
 
-               RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/",
-                        "SM[LowPenaltyRA]", coex_dm->cur_low_penalty_ra);
+               seq_printf(m, "\n %-35s = %d/",
+                          "SM[LowPenaltyRA]", coex_dm->cur_low_penalty_ra);
 
-               RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s/ %s/ %d ",
-                        "DelBA/ BtCtrlAgg/ AggSize",
+               seq_printf(m, "\n %-35s = %s/ %s/ %d ",
+                          "DelBA/ BtCtrlAgg/ AggSize",
                           (btcoexist->bt_info.reject_agg_pkt ? "Yes" : "No"),
                           (btcoexist->bt_info.bt_ctrl_buf_size ? "Yes" : "No"),
                           btcoexist->bt_info.agg_buf_size);
 
-               RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x ",
-                        "Rate Mask", btcoexist->bt_info.ra_mask);
+               seq_printf(m, "\n %-35s = 0x%x ",
+                          "Rate Mask", btcoexist->bt_info.ra_mask);
 
                /* Fw mechanism */
-               RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s",
-                        "============[Fw mechanism]============");
+               seq_printf(m, "\n %-35s",
+                          "============[Fw mechanism]============");
 
                pstdmacase = coex_dm->cur_ps_tdma;
-               RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                        "\r\n %-35s = %5ph case-%d (auto:%d)",
+               seq_printf(m, "\n %-35s = %5ph case-%d (auto:%d)",
                           "PS TDMA", coex_dm->ps_tdma_para,
                           pstdmacase, coex_dm->auto_tdma_adjust);
 
-               RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d ",
-                        "IgnWlanAct", coex_dm->cur_ignore_wlan_act);
+               seq_printf(m, "\n %-35s = %d ",
+                          "IgnWlanAct", coex_dm->cur_ignore_wlan_act);
 
-               RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x ",
-                        "Latest error condition(should be 0)",
+               seq_printf(m, "\n %-35s = 0x%x ",
+                          "Latest error condition(should be 0)",
                           coex_dm->error_condition);
        }
 
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d",
-                "Coex Table Type", coex_sta->coex_table_type);
+       seq_printf(m, "\n %-35s = %d",
+                  "Coex Table Type", coex_sta->coex_table_type);
 
        /* Hw setting */
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s",
-                "============[Hw setting]============");
+       seq_printf(m, "\n %-35s",
+                  "============[Hw setting]============");
 
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/0x%x/0x%x/0x%x",
-                "backup ARFR1/ARFR2/RL/AMaxTime", coex_dm->backup_arfr_cnt1,
+       seq_printf(m, "\n %-35s = 0x%x/0x%x/0x%x/0x%x",
+                  "backup ARFR1/ARFR2/RL/AMaxTime", coex_dm->backup_arfr_cnt1,
                   coex_dm->backup_arfr_cnt2, coex_dm->backup_retry_limit,
                   coex_dm->backup_ampdu_max_time);
 
@@ -2666,50 +2656,49 @@ void ex_btc8723b1ant_display_coex_info(struct btc_coexist *btcoexist)
        u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x434);
        u16tmp[0] = btcoexist->btc_read_2byte(btcoexist, 0x42a);
        u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x456);
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/0x%x/0x%x/0x%x",
-                "0x430/0x434/0x42a/0x456",
-                u32tmp[0], u32tmp[1], u16tmp[0], u8tmp[0]);
+       seq_printf(m, "\n %-35s = 0x%x/0x%x/0x%x/0x%x",
+                  "0x430/0x434/0x42a/0x456",
+                  u32tmp[0], u32tmp[1], u16tmp[0], u8tmp[0]);
 
        u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x778);
        u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x6cc);
        u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x880);
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
-                "0x778/0x6cc/0x880[29:25]", u8tmp[0], u32tmp[0],
-                (u32tmp[1] & 0x3e000000) >> 25);
+       seq_printf(m, "\n %-35s = 0x%x/ 0x%x/ 0x%x",
+                  "0x778/0x6cc/0x880[29:25]", u8tmp[0], u32tmp[0],
+                  (u32tmp[1] & 0x3e000000) >> 25);
 
        u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x948);
        u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x67);
        u8tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0x765);
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
-                "0x948/ 0x67[5] / 0x765",
-                u32tmp[0], ((u8tmp[0] & 0x20) >> 5), u8tmp[1]);
+       seq_printf(m, "\n %-35s = 0x%x/ 0x%x/ 0x%x",
+                  "0x948/ 0x67[5] / 0x765",
+                  u32tmp[0], ((u8tmp[0] & 0x20) >> 5), u8tmp[1]);
 
        u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x92c);
        u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x930);
        u32tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x944);
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
-                "0x92c[1:0]/ 0x930[7:0]/0x944[1:0]",
-                u32tmp[0] & 0x3, u32tmp[1] & 0xff, u32tmp[2] & 0x3);
+       seq_printf(m, "\n %-35s = 0x%x/ 0x%x/ 0x%x",
+                  "0x92c[1:0]/ 0x930[7:0]/0x944[1:0]",
+                  u32tmp[0] & 0x3, u32tmp[1] & 0xff, u32tmp[2] & 0x3);
 
        u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x39);
        u8tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0x40);
        u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x4c);
        u8tmp[2] = btcoexist->btc_read_1byte(btcoexist, 0x64);
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x",
-                "0x38[11]/0x40/0x4c[24:23]/0x64[0]",
-                ((u8tmp[0] & 0x8) >> 3), u8tmp[1],
-                 ((u32tmp[0] & 0x01800000) >> 23), u8tmp[2] & 0x1);
+       seq_printf(m, "\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x",
+                  "0x38[11]/0x40/0x4c[24:23]/0x64[0]",
+                  ((u8tmp[0] & 0x8) >> 3), u8tmp[1],
+                   ((u32tmp[0] & 0x01800000) >> 23), u8tmp[2] & 0x1);
 
        u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x550);
        u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x522);
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x",
-                "0x550(bcn ctrl)/0x522", u32tmp[0], u8tmp[0]);
+       seq_printf(m, "\n %-35s = 0x%x/ 0x%x",
+                  "0x550(bcn ctrl)/0x522", u32tmp[0], u8tmp[0]);
 
        u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xc50);
        u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x49c);
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x",
-                "0xc50(dig)/0x49c(null-drop)", u32tmp[0] & 0xff, u8tmp[0]);
+       seq_printf(m, "\n %-35s = 0x%x/ 0x%x",
+                  "0xc50(dig)/0x49c(null-drop)", u32tmp[0] & 0xff, u8tmp[0]);
 
        u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xda0);
        u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0xda4);
@@ -2727,26 +2716,26 @@ void ex_btc8723b1ant_display_coex_info(struct btc_coexist *btcoexist)
                   (u32tmp[3] & 0xffff);
        fa_cck = (u8tmp[0] << 8) + u8tmp[1];
 
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
-                "OFDM-CCA/OFDM-FA/CCK-FA",
+       seq_printf(m, "\n %-35s = 0x%x/ 0x%x/ 0x%x",
+                  "OFDM-CCA/OFDM-FA/CCK-FA",
                 u32tmp[0] & 0xffff, fa_ofdm, fa_cck);
 
        u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x6c0);
        u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x6c4);
        u32tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x6c8);
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
-                "0x6c0/0x6c4/0x6c8(coexTable)",
-                u32tmp[0], u32tmp[1], u32tmp[2]);
-
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d",
-                "0x770(high-pri rx/tx)", coex_sta->high_priority_rx,
-                coex_sta->high_priority_tx);
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d",
-                "0x774(low-pri rx/tx)", coex_sta->low_priority_rx,
-                coex_sta->low_priority_tx);
+       seq_printf(m, "\n %-35s = 0x%x/ 0x%x/ 0x%x",
+                  "0x6c0/0x6c4/0x6c8(coexTable)",
+                  u32tmp[0], u32tmp[1], u32tmp[2]);
+
+       seq_printf(m, "\n %-35s = %d/ %d",
+                  "0x770(high-pri rx/tx)", coex_sta->high_priority_rx,
+                  coex_sta->high_priority_tx);
+       seq_printf(m, "\n %-35s = %d/ %d",
+                  "0x774(low-pri rx/tx)", coex_sta->low_priority_rx,
+                  coex_sta->low_priority_tx);
        if (btcoexist->auto_report_1ant)
                halbtc8723b1ant_monitor_bt_ctr(btcoexist);
-       btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_COEX_STATISTICS);
+       btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_COEX_STATISTICS, m);
 }
 
 void ex_btc8723b1ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
index 8d4fde2..934f278 100644 (file)
@@ -220,5 +220,6 @@ void ex_btc8723b1ant_halt_notify(struct btc_coexist *btcoexist);
 void ex_btc8723b1ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnpstate);
 void ex_btc8723b1ant_coex_dm_reset(struct btc_coexist *btcoexist);
 void ex_btc8723b1ant_periodical(struct btc_coexist *btcoexist);
-void ex_btc8723b1ant_display_coex_info(struct btc_coexist *btcoexist);
+void ex_btc8723b1ant_display_coex_info(struct btc_coexist *btcoexist,
+                                      struct seq_file *m);
 void ex_btc8723b1ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state);
index e8f0757..4907c2f 100644 (file)
@@ -3780,12 +3780,12 @@ void ex_btc8723b2ant_init_coex_dm(struct btc_coexist *btcoexist)
        btc8723b2ant_init_coex_dm(btcoexist);
 }
 
-void ex_btc8723b2ant_display_coex_info(struct btc_coexist *btcoexist)
+void ex_btc8723b2ant_display_coex_info(struct btc_coexist *btcoexist,
+                                      struct seq_file *m)
 {
        struct btc_board_info *board_info = &btcoexist->board_info;
        struct btc_stack_info *stack_info = &btcoexist->stack_info;
        struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
-       struct rtl_priv *rtlpriv = btcoexist->adapter;
        u8 u8tmp[4], i, bt_info_ext, ps_tdma_case = 0;
        u32 u32tmp[4];
        bool roam = false, scan = false;
@@ -3797,173 +3797,161 @@ void ex_btc8723b2ant_display_coex_info(struct btc_coexist *btcoexist)
        u32 fw_ver = 0, bt_patch_ver = 0;
        u8 ap_num = 0;
 
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                "\r\n ============[BT Coexist info]============");
+       seq_puts(m, "\n ============[BT Coexist info]============");
 
        if (btcoexist->manual_control) {
-               RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                        "\r\n ==========[Under Manual Control]============");
-               RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                        "\r\n ==========================================");
-       }
-
-       if (!board_info->bt_exist) {
-               RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n BT not exists !!!");
-               return;
+               seq_puts(m, "\n ==========[Under Manual Control]============");
+               seq_puts(m, "\n ==========================================");
        }
 
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d ",
-                "Ant PG number/ Ant mechanism:",
-                board_info->pg_ant_num, board_info->btdm_ant_num);
+       seq_printf(m, "\n %-35s = %d/ %d ",
+                  "Ant PG number/ Ant mechanism:",
+                  board_info->pg_ant_num, board_info->btdm_ant_num);
 
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s / %d",
-                "BT stack/ hci ext ver",
-                ((stack_info->profile_notified) ? "Yes" : "No"),
-                stack_info->hci_version);
+       seq_printf(m, "\n %-35s = %s / %d",
+                  "BT stack/ hci ext ver",
+                  ((stack_info->profile_notified) ? "Yes" : "No"),
+                  stack_info->hci_version);
 
        btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER, &bt_patch_ver);
        btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                "\r\n %-35s = %d_%x/ 0x%x/ 0x%x(%d)",
-                "CoexVer/ FwVer/ PatchVer",
-                glcoex_ver_date_8723b_2ant, glcoex_ver_8723b_2ant,
-                fw_ver, bt_patch_ver, bt_patch_ver);
+       seq_printf(m, "\n %-35s = %d_%x/ 0x%x/ 0x%x(%d)",
+                  "CoexVer/ FwVer/ PatchVer",
+                  glcoex_ver_date_8723b_2ant, glcoex_ver_8723b_2ant,
+                  fw_ver, bt_patch_ver, bt_patch_ver);
 
        btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
        btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_DOT11_CHNL,
                           &wifi_dot11_chnl);
        btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_HS_CHNL, &wifi_hs_chnl);
 
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d / %d(%d)",
-                "Dot11 channel / HsChnl(HsMode)",
-                wifi_dot11_chnl, wifi_hs_chnl, bt_hs_on);
+       seq_printf(m, "\n %-35s = %d / %d(%d)",
+                  "Dot11 channel / HsChnl(HsMode)",
+                  wifi_dot11_chnl, wifi_hs_chnl, bt_hs_on);
 
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %3ph ",
-                "H2C Wifi inform bt chnl Info", coex_dm->wifi_chnl_info);
+       seq_printf(m, "\n %-35s = %3ph ",
+                  "H2C Wifi inform bt chnl Info", coex_dm->wifi_chnl_info);
 
        btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifi_rssi);
        btcoexist->btc_get(btcoexist, BTC_GET_S4_HS_RSSI, &bt_hs_rssi);
        btcoexist->btc_get(btcoexist, BTC_GET_U1_AP_NUM, &ap_num);
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d/ %d",
-                "Wifi rssi/ HS rssi/ AP#", wifi_rssi, bt_hs_rssi, ap_num);
+       seq_printf(m, "\n %-35s = %d/ %d/ %d",
+                  "Wifi rssi/ HS rssi/ AP#", wifi_rssi, bt_hs_rssi, ap_num);
 
        btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
        btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
        btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam);
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d/ %d ",
-                "Wifi link/ roam/ scan", link, roam, scan);
+       seq_printf(m, "\n %-35s = %d/ %d/ %d ",
+                  "Wifi link/ roam/ scan", link, roam, scan);
 
        btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g);
        btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
        btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
        btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_TRAFFIC_DIRECTION,
                           &wifi_traffic_dir);
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s / %s/ %s ",
-                "Wifi status", (wifi_under_5g ? "5G" : "2.4G"),
+       seq_printf(m, "\n %-35s = %s / %s/ %s ",
+                  "Wifi status", (wifi_under_5g ? "5G" : "2.4G"),
                 ((wifi_bw == BTC_WIFI_BW_LEGACY) ? "Legacy" :
                 (((wifi_bw == BTC_WIFI_BW_HT40) ? "HT40" : "HT20"))),
                 ((!wifi_busy) ? "idle" :
                 ((wifi_traffic_dir == BTC_WIFI_TRAFFIC_TX) ?
                  "uplink" : "downlink")));
 
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d / %d / %d / %d",
-                "SCO/HID/PAN/A2DP",
-                bt_link_info->sco_exist, bt_link_info->hid_exist,
-                bt_link_info->pan_exist, bt_link_info->a2dp_exist);
-       btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_BT_LINK_INFO);
+       seq_printf(m, "\n %-35s = %d / %d / %d / %d",
+                  "SCO/HID/PAN/A2DP",
+                  bt_link_info->sco_exist, bt_link_info->hid_exist,
+                  bt_link_info->pan_exist, bt_link_info->a2dp_exist);
+       btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_BT_LINK_INFO, m);
 
        bt_info_ext = coex_sta->bt_info_ext;
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s",
-                "BT Info A2DP rate",
-                (bt_info_ext & BIT0) ? "Basic rate" : "EDR rate");
+       seq_printf(m, "\n %-35s = %s",
+                  "BT Info A2DP rate",
+                  (bt_info_ext & BIT0) ? "Basic rate" : "EDR rate");
 
        for (i = 0; i < BT_INFO_SRC_8723B_2ANT_MAX; i++) {
                if (coex_sta->bt_info_c2h_cnt[i]) {
-                       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                                "\r\n %-35s = %7ph(%d)",
-                                glbt_info_src_8723b_2ant[i],
-                                coex_sta->bt_info_c2h[i],
-                                coex_sta->bt_info_c2h_cnt[i]);
+                       seq_printf(m, "\n %-35s = %7ph(%d)",
+                                  glbt_info_src_8723b_2ant[i],
+                                  coex_sta->bt_info_c2h[i],
+                                  coex_sta->bt_info_c2h_cnt[i]);
                }
        }
 
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s/%s",
-                "PS state, IPS/LPS",
-                ((coex_sta->under_ips ? "IPS ON" : "IPS OFF")),
-                ((coex_sta->under_lps ? "LPS ON" : "LPS OFF")));
-       btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_FW_PWR_MODE_CMD);
+       seq_printf(m, "\n %-35s = %s/%s",
+                  "PS state, IPS/LPS",
+                  ((coex_sta->under_ips ? "IPS ON" : "IPS OFF")),
+                  ((coex_sta->under_lps ? "LPS ON" : "LPS OFF")));
+       btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_FW_PWR_MODE_CMD, m);
 
        /* Sw mechanism */
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                "\r\n %-35s", "============[Sw mechanism]============");
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d/ %d ",
-                "SM1[ShRf/ LpRA/ LimDig]", coex_dm->cur_rf_rx_lpf_shrink,
-                coex_dm->cur_low_penalty_ra, coex_dm->limited_dig);
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d/ %d(0x%x) ",
-                "SM2[AgcT/ AdcB/ SwDacSwing(lvl)]",
-                coex_dm->cur_agc_table_en, coex_dm->cur_adc_back_off,
-                coex_dm->cur_dac_swing_on, coex_dm->cur_dac_swing_lvl);
+       seq_printf(m,
+                  "\n %-35s", "============[Sw mechanism]============");
+       seq_printf(m, "\n %-35s = %d/ %d/ %d ",
+                  "SM1[ShRf/ LpRA/ LimDig]", coex_dm->cur_rf_rx_lpf_shrink,
+                  coex_dm->cur_low_penalty_ra, coex_dm->limited_dig);
+       seq_printf(m, "\n %-35s = %d/ %d/ %d(0x%x) ",
+                  "SM2[AgcT/ AdcB/ SwDacSwing(lvl)]",
+                  coex_dm->cur_agc_table_en, coex_dm->cur_adc_back_off,
+                  coex_dm->cur_dac_swing_on, coex_dm->cur_dac_swing_lvl);
 
        /* Fw mechanism */
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s",
-                "============[Fw mechanism]============");
+       seq_printf(m, "\n %-35s",
+                  "============[Fw mechanism]============");
 
        ps_tdma_case = coex_dm->cur_ps_tdma;
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                "\r\n %-35s = %5ph case-%d (auto:%d)",
-                "PS TDMA", coex_dm->ps_tdma_para,
-                ps_tdma_case, coex_dm->auto_tdma_adjust);
+       seq_printf(m, "\n %-35s = %5ph case-%d (auto:%d)",
+                  "PS TDMA", coex_dm->ps_tdma_para,
+                  ps_tdma_case, coex_dm->auto_tdma_adjust);
 
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d ",
-                "DecBtPwr/ IgnWlanAct", coex_dm->cur_dec_bt_pwr_lvl,
-                coex_dm->cur_ignore_wlan_act);
+       seq_printf(m, "\n %-35s = %d/ %d ",
+                  "DecBtPwr/ IgnWlanAct", coex_dm->cur_dec_bt_pwr_lvl,
+                  coex_dm->cur_ignore_wlan_act);
 
        /* Hw setting */
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s",
-                "============[Hw setting]============");
+       seq_printf(m, "\n %-35s",
+                  "============[Hw setting]============");
 
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x",
-                "RF-A, 0x1e initVal", coex_dm->bt_rf0x1e_backup);
+       seq_printf(m, "\n %-35s = 0x%x",
+                  "RF-A, 0x1e initVal", coex_dm->bt_rf0x1e_backup);
 
        u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x778);
        u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x880);
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x",
-                "0x778/0x880[29:25]", u8tmp[0],
-                (u32tmp[0] & 0x3e000000) >> 25);
+       seq_printf(m, "\n %-35s = 0x%x/ 0x%x",
+                  "0x778/0x880[29:25]", u8tmp[0],
+                  (u32tmp[0] & 0x3e000000) >> 25);
 
        u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x948);
        u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x67);
        u8tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0x765);
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
-                "0x948/ 0x67[5] / 0x765",
-                u32tmp[0], ((u8tmp[0] & 0x20) >> 5), u8tmp[1]);
+       seq_printf(m, "\n %-35s = 0x%x/ 0x%x/ 0x%x",
+                  "0x948/ 0x67[5] / 0x765",
+                  u32tmp[0], ((u8tmp[0] & 0x20) >> 5), u8tmp[1]);
 
        u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x92c);
        u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x930);
        u32tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x944);
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
-                "0x92c[1:0]/ 0x930[7:0]/0x944[1:0]",
-                u32tmp[0] & 0x3, u32tmp[1] & 0xff, u32tmp[2] & 0x3);
+       seq_printf(m, "\n %-35s = 0x%x/ 0x%x/ 0x%x",
+                  "0x92c[1:0]/ 0x930[7:0]/0x944[1:0]",
+                  u32tmp[0] & 0x3, u32tmp[1] & 0xff, u32tmp[2] & 0x3);
 
        u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x39);
        u8tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0x40);
        u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x4c);
        u8tmp[2] = btcoexist->btc_read_1byte(btcoexist, 0x64);
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x",
-                "0x38[11]/0x40/0x4c[24:23]/0x64[0]",
-                ((u8tmp[0] & 0x8) >> 3), u8tmp[1],
-                ((u32tmp[0] & 0x01800000) >> 23), u8tmp[2] & 0x1);
+       seq_printf(m, "\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x",
+                  "0x38[11]/0x40/0x4c[24:23]/0x64[0]",
+                  ((u8tmp[0] & 0x8) >> 3), u8tmp[1],
+                  ((u32tmp[0] & 0x01800000) >> 23), u8tmp[2] & 0x1);
 
        u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x550);
        u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x522);
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x",
-                "0x550(bcn ctrl)/0x522", u32tmp[0], u8tmp[0]);
+       seq_printf(m, "\n %-35s = 0x%x/ 0x%x",
+                  "0x550(bcn ctrl)/0x522", u32tmp[0], u8tmp[0]);
 
        u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xc50);
        u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x49c);
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x",
-                "0xc50(dig)/0x49c(null-drop)", u32tmp[0] & 0xff, u8tmp[0]);
+       seq_printf(m, "\n %-35s = 0x%x/ 0x%x",
+                  "0xc50(dig)/0x49c(null-drop)", u32tmp[0] & 0xff, u8tmp[0]);
 
        u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xda0);
        u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0xda4);
@@ -3981,29 +3969,27 @@ void ex_btc8723b2ant_display_coex_info(struct btc_coexist *btcoexist)
                   (u32tmp[3] & 0xffff);
        fa_cck = (u8tmp[0] << 8) + u8tmp[1];
 
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
-                "OFDM-CCA/OFDM-FA/CCK-FA",
-                u32tmp[0] & 0xffff, fa_ofdm, fa_cck);
+       seq_printf(m, "\n %-35s = 0x%x/ 0x%x/ 0x%x",
+                  "OFDM-CCA/OFDM-FA/CCK-FA",
+                  u32tmp[0] & 0xffff, fa_ofdm, fa_cck);
 
        u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x6c0);
        u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x6c4);
        u32tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x6c8);
        u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x6cc);
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x",
-                "0x6c0/0x6c4/0x6c8/0x6cc(coexTable)",
-                u32tmp[0], u32tmp[1], u32tmp[2], u8tmp[0]);
-
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d",
-                "0x770(high-pri rx/tx)",
-                coex_sta->high_priority_rx, coex_sta->high_priority_tx);
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d",
-                "0x774(low-pri rx/tx)", coex_sta->low_priority_rx,
-                coex_sta->low_priority_tx);
+       seq_printf(m, "\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x",
+                  "0x6c0/0x6c4/0x6c8/0x6cc(coexTable)",
+                  u32tmp[0], u32tmp[1], u32tmp[2], u8tmp[0]);
+
+       seq_printf(m, "\n %-35s = %d/ %d",
+                  "0x770(high-pri rx/tx)",
+                  coex_sta->high_priority_rx, coex_sta->high_priority_tx);
+       seq_printf(m, "\n %-35s = %d/ %d",
+                  "0x774(low-pri rx/tx)", coex_sta->low_priority_rx,
+                  coex_sta->low_priority_tx);
        if (btcoexist->auto_report_2ant)
                btc8723b2ant_monitor_bt_ctr(btcoexist);
-       btcoexist->btc_disp_dbg_msg(btcoexist,
-       BTC_DBG_DISP_COEX_STATISTICS);
+       btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_COEX_STATISTICS, m);
 }
 
 void ex_btc8723b2ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
index bc1e304..aa24da4 100644 (file)
@@ -195,7 +195,8 @@ void ex_btc8723b2ant_bt_info_notify(struct btc_coexist *btcoexist,
                                    u8 *tmpbuf, u8 length);
 void ex_btc8723b2ant_halt_notify(struct btc_coexist *btcoexist);
 void ex_btc8723b2ant_periodical(struct btc_coexist *btcoexist);
-void ex_btc8723b2ant_display_coex_info(struct btc_coexist *btcoexist);
+void ex_btc8723b2ant_display_coex_info(struct btc_coexist *btcoexist,
+                                      struct seq_file *m);
 void ex_btc8723b2ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state);
 void ex_btc8723b2ant_pre_load_firmware(struct btc_coexist *btcoexist);
 void ex_btc8723b2ant_power_on_setting(struct btc_coexist *btcoexist);
index 4efac5f..0b26419 100644 (file)
@@ -2172,12 +2172,12 @@ void ex_btc8821a1ant_init_coex_dm(struct btc_coexist *btcoexist)
        btc8821a1ant_query_bt_info(btcoexist);
 }
 
-void ex_btc8821a1ant_display_coex_info(struct btc_coexist *btcoexist)
+void ex_btc8821a1ant_display_coex_info(struct btc_coexist *btcoexist,
+                                      struct seq_file *m)
 {
        struct btc_board_info *board_info = &btcoexist->board_info;
        struct btc_stack_info *stack_info = &btcoexist->stack_info;
        struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
-       struct rtl_priv *rtlpriv = btcoexist->adapter;
        u8 u1_tmp[4], i, bt_info_ext, ps_tdma_case = 0;
        u16 u2_tmp[4];
        u32 u4_tmp[4];
@@ -2188,49 +2188,36 @@ void ex_btc8821a1ant_display_coex_info(struct btc_coexist *btcoexist)
        u8 wifi_dot11_chnl, wifi_hs_chnl;
        u32 fw_ver = 0, bt_patch_ver = 0;
 
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                "\r\n ============[BT Coexist info]============");
+       seq_puts(m, "\n ============[BT Coexist info]============");
 
        if (btcoexist->manual_control) {
-               RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                        "\r\n ============[Under Manual Control]============");
-               RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                        "\r\n ==========================================");
+               seq_puts(m, "\n ============[Under Manual Control]============");
+               seq_puts(m, "\n ==========================================");
        }
        if (btcoexist->stop_coex_dm) {
-               RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                        "\r\n ============[Coex is STOPPED]============");
-               RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                        "\r\n ==========================================");
-       }
-
-       if (!board_info->bt_exist) {
-               RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n BT not exists !!!");
-               return;
+               seq_puts(m, "\n ============[Coex is STOPPED]============");
+               seq_puts(m, "\n ==========================================");
        }
 
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                "\r\n %-35s = %d/ %d/ %d",
-                "Ant PG Num/ Ant Mech/ Ant Pos:",
-                board_info->pg_ant_num,
-                board_info->btdm_ant_num,
-                board_info->btdm_ant_pos);
+       seq_printf(m, "\n %-35s = %d/ %d/ %d",
+                  "Ant PG Num/ Ant Mech/ Ant Pos:",
+                  board_info->pg_ant_num,
+                  board_info->btdm_ant_num,
+                  board_info->btdm_ant_pos);
 
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                "\r\n %-35s = %s / %d", "BT stack/ hci ext ver",
-                ((stack_info->profile_notified) ? "Yes" : "No"),
-                stack_info->hci_version);
+       seq_printf(m, "\n %-35s = %s / %d", "BT stack/ hci ext ver",
+                  ((stack_info->profile_notified) ? "Yes" : "No"),
+                  stack_info->hci_version);
 
        btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER,
                           &bt_patch_ver);
        btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                "\r\n %-35s = %d_%x/ 0x%x/ 0x%x(%d)",
-                "CoexVer/ FwVer/ PatchVer",
-                glcoex_ver_date_8821a_1ant,
-                glcoex_ver_8821a_1ant,
-                fw_ver, bt_patch_ver,
-                bt_patch_ver);
+       seq_printf(m, "\n %-35s = %d_%x/ 0x%x/ 0x%x(%d)",
+                  "CoexVer/ FwVer/ PatchVer",
+                  glcoex_ver_date_8821a_1ant,
+                  glcoex_ver_8821a_1ant,
+                  fw_ver, bt_patch_ver,
+                  bt_patch_ver);
 
        btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION,
                           &bt_hs_on);
@@ -2238,28 +2225,24 @@ void ex_btc8821a1ant_display_coex_info(struct btc_coexist *btcoexist)
                           &wifi_dot11_chnl);
        btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_HS_CHNL,
                           &wifi_hs_chnl);
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                "\r\n %-35s = %d / %d(%d)",
-                "Dot11 channel / HsChnl(HsMode)",
-                wifi_dot11_chnl, wifi_hs_chnl, bt_hs_on);
+       seq_printf(m, "\n %-35s = %d / %d(%d)",
+                  "Dot11 channel / HsChnl(HsMode)",
+                  wifi_dot11_chnl, wifi_hs_chnl, bt_hs_on);
 
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                "\r\n %-35s = %3ph ",
-                "H2C Wifi inform bt chnl Info",
-                coex_dm->wifi_chnl_info);
+       seq_printf(m, "\n %-35s = %3ph ",
+                  "H2C Wifi inform bt chnl Info",
+                  coex_dm->wifi_chnl_info);
 
        btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifi_rssi);
        btcoexist->btc_get(btcoexist, BTC_GET_S4_HS_RSSI, &bt_hs_rssi);
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                "\r\n %-35s = %d/ %d", "Wifi rssi/ HS rssi",
-                (int)wifi_rssi, (int)bt_hs_rssi);
+       seq_printf(m, "\n %-35s = %d/ %d", "Wifi rssi/ HS rssi",
+                  (int)wifi_rssi, (int)bt_hs_rssi);
 
        btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
        btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
        btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam);
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                "\r\n %-35s = %d/ %d/ %d ", "Wifi link/ roam/ scan",
-                link, roam, scan);
+       seq_printf(m, "\n %-35s = %d/ %d/ %d ", "Wifi link/ roam/ scan",
+                  link, roam, scan);
 
        btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G,
                           &wifi_under_5g);
@@ -2269,16 +2252,15 @@ void ex_btc8821a1ant_display_coex_info(struct btc_coexist *btcoexist)
                           &wifi_busy);
        btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_TRAFFIC_DIRECTION,
                           &wifi_traffic_dir);
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                "\r\n %-35s = %s / %s/ %s ", "Wifi status",
-                (wifi_under_5g ? "5G" : "2.4G"),
-                ((wifi_bw == BTC_WIFI_BW_LEGACY) ? "Legacy" :
-                (((wifi_bw == BTC_WIFI_BW_HT40) ? "HT40" : "HT20"))),
-                ((!wifi_busy) ? "idle" :
-                ((wifi_traffic_dir == BTC_WIFI_TRAFFIC_TX) ?
-                "uplink" : "downlink")));
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                  "\r\n %-35s = [%s/ %d/ %d] ", "BT [status/ rssi/ retryCnt]",
+       seq_printf(m, "\n %-35s = %s / %s/ %s ", "Wifi status",
+                  (wifi_under_5g ? "5G" : "2.4G"),
+                  ((wifi_bw == BTC_WIFI_BW_LEGACY) ? "Legacy" :
+                  (((wifi_bw == BTC_WIFI_BW_HT40) ? "HT40" : "HT20"))),
+                  ((!wifi_busy) ? "idle" :
+                  ((wifi_traffic_dir == BTC_WIFI_TRAFFIC_TX) ?
+                  "uplink" : "downlink")));
+       seq_printf(m, "\n %-35s = [%s/ %d/ %d] ",
+                  "BT [status/ rssi/ retryCnt]",
                   ((coex_sta->bt_disabled) ? ("disabled") :
                   ((coex_sta->c2h_bt_inquiry_page) ? ("inquiry/page scan") :
                   ((BT_8821A_1ANT_BT_STATUS_NON_CONNECTED_IDLE ==
@@ -2289,166 +2271,143 @@ void ex_btc8821a1ant_display_coex_info(struct btc_coexist *btcoexist)
                   "connected-idle" : "busy")))),
                   coex_sta->bt_rssi, coex_sta->bt_retry_cnt);
 
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                "\r\n %-35s = %d / %d / %d / %d", "SCO/HID/PAN/A2DP",
-                bt_link_info->sco_exist,
-                bt_link_info->hid_exist,
-                bt_link_info->pan_exist,
-                bt_link_info->a2dp_exist);
-       btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_BT_LINK_INFO);
+       seq_printf(m, "\n %-35s = %d / %d / %d / %d", "SCO/HID/PAN/A2DP",
+                  bt_link_info->sco_exist,
+                  bt_link_info->hid_exist,
+                  bt_link_info->pan_exist,
+                  bt_link_info->a2dp_exist);
+       btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_BT_LINK_INFO, m);
 
        bt_info_ext = coex_sta->bt_info_ext;
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                "\r\n %-35s = %s",
-                "BT Info A2DP rate",
-                (bt_info_ext & BIT0) ?
-                "Basic rate" : "EDR rate");
+       seq_printf(m, "\n %-35s = %s",
+                  "BT Info A2DP rate",
+                  (bt_info_ext & BIT0) ?
+                  "Basic rate" : "EDR rate");
 
        for (i = 0; i < BT_INFO_SRC_8821A_1ANT_MAX; i++) {
                if (coex_sta->bt_info_c2h_cnt[i]) {
-                       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                                "\r\n %-35s = %7ph(%d)",
-                                glbt_info_src_8821a_1ant[i],
-                                coex_sta->bt_info_c2h[i],
-                                coex_sta->bt_info_c2h_cnt[i]);
+                       seq_printf(m, "\n %-35s = %7ph(%d)",
+                                  glbt_info_src_8821a_1ant[i],
+                                  coex_sta->bt_info_c2h[i],
+                                  coex_sta->bt_info_c2h_cnt[i]);
                }
        }
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                "\r\n %-35s = %s/%s, (0x%x/0x%x)",
-                "PS state, IPS/LPS, (lps/rpwm)",
-                ((coex_sta->under_ips ? "IPS ON" : "IPS OFF")),
-                ((coex_sta->under_lps ? "LPS ON" : "LPS OFF")),
-                btcoexist->bt_info.lps_val,
-                btcoexist->bt_info.rpwm_val);
-       btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_FW_PWR_MODE_CMD);
+       seq_printf(m, "\n %-35s = %s/%s, (0x%x/0x%x)",
+                  "PS state, IPS/LPS, (lps/rpwm)",
+                  ((coex_sta->under_ips ? "IPS ON" : "IPS OFF")),
+                  ((coex_sta->under_lps ? "LPS ON" : "LPS OFF")),
+                  btcoexist->bt_info.lps_val,
+                  btcoexist->bt_info.rpwm_val);
+       btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_FW_PWR_MODE_CMD, m);
 
        if (!btcoexist->manual_control) {
                /* Sw mechanism*/
-               RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                        "\r\n %-35s",
-                        "============[Sw mechanism]============");
-
-               RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                        "\r\n %-35s = %d", "SM[LowPenaltyRA]",
-                        coex_dm->cur_low_penalty_ra);
-
-               RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                        "\r\n %-35s = %s/ %s/ %d ",
-                        "DelBA/ BtCtrlAgg/ AggSize",
-                        (btcoexist->bt_info.reject_agg_pkt ? "Yes" : "No"),
-                        (btcoexist->bt_info.bt_ctrl_buf_size ? "Yes" : "No"),
-                        btcoexist->bt_info.agg_buf_size);
-               RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                        "\r\n %-35s = 0x%x ", "Rate Mask",
-                        btcoexist->bt_info.ra_mask);
+               seq_printf(m, "\n %-35s",
+                          "============[Sw mechanism]============");
+
+               seq_printf(m, "\n %-35s = %d", "SM[LowPenaltyRA]",
+                          coex_dm->cur_low_penalty_ra);
+
+               seq_printf(m, "\n %-35s = %s/ %s/ %d ",
+                          "DelBA/ BtCtrlAgg/ AggSize",
+                          (btcoexist->bt_info.reject_agg_pkt ? "Yes" : "No"),
+                          (btcoexist->bt_info.bt_ctrl_buf_size ? "Yes" : "No"),
+                          btcoexist->bt_info.agg_buf_size);
+               seq_printf(m, "\n %-35s = 0x%x ", "Rate Mask",
+                          btcoexist->bt_info.ra_mask);
 
                /* Fw mechanism */
-               RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s",
-                        "============[Fw mechanism]============");
+               seq_printf(m, "\n %-35s",
+                          "============[Fw mechanism]============");
 
                ps_tdma_case = coex_dm->cur_ps_tdma;
-               RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                        "\r\n %-35s = %5ph case-%d (auto:%d)",
-                        "PS TDMA",
-                        coex_dm->ps_tdma_para,
-                        ps_tdma_case,
-                        coex_dm->auto_tdma_adjust);
-
-               RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                        "\r\n %-35s = 0x%x ",
-                        "Latest error condition(should be 0)",
+               seq_printf(m, "\n %-35s = %5ph case-%d (auto:%d)",
+                          "PS TDMA",
+                          coex_dm->ps_tdma_para,
+                          ps_tdma_case,
+                          coex_dm->auto_tdma_adjust);
+
+               seq_printf(m, "\n %-35s = 0x%x ",
+                          "Latest error condition(should be 0)",
                           coex_dm->error_condition);
 
-               RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                        "\r\n %-35s = %d ", "IgnWlanAct",
-                        coex_dm->cur_ignore_wlan_act);
+               seq_printf(m, "\n %-35s = %d ", "IgnWlanAct",
+                          coex_dm->cur_ignore_wlan_act);
        }
 
        /* Hw setting */
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                "\r\n %-35s", "============[Hw setting]============");
+       seq_printf(m, "\n %-35s", "============[Hw setting]============");
 
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                "\r\n %-35s = 0x%x/0x%x/0x%x/0x%x",
-                "backup ARFR1/ARFR2/RL/AMaxTime",
-                coex_dm->backup_arfr_cnt1,
-                coex_dm->backup_arfr_cnt2,
-                coex_dm->backup_retry_limit,
-                coex_dm->backup_ampdu_max_time);
+       seq_printf(m, "\n %-35s = 0x%x/0x%x/0x%x/0x%x",
+                  "backup ARFR1/ARFR2/RL/AMaxTime",
+                  coex_dm->backup_arfr_cnt1,
+                  coex_dm->backup_arfr_cnt2,
+                  coex_dm->backup_retry_limit,
+                  coex_dm->backup_ampdu_max_time);
 
        u4_tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x430);
        u4_tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x434);
        u2_tmp[0] = btcoexist->btc_read_2byte(btcoexist, 0x42a);
        u1_tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x456);
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                "\r\n %-35s = 0x%x/0x%x/0x%x/0x%x",
-                "0x430/0x434/0x42a/0x456",
-                u4_tmp[0], u4_tmp[1], u2_tmp[0], u1_tmp[0]);
+       seq_printf(m, "\n %-35s = 0x%x/0x%x/0x%x/0x%x",
+                  "0x430/0x434/0x42a/0x456",
+                  u4_tmp[0], u4_tmp[1], u2_tmp[0], u1_tmp[0]);
 
        u1_tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x778);
        u4_tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xc58);
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                "\r\n %-35s = 0x%x/ 0x%x", "0x778/ 0xc58[29:25]",
-                u1_tmp[0], (u4_tmp[0] & 0x3e000000) >> 25);
+       seq_printf(m, "\n %-35s = 0x%x/ 0x%x", "0x778/ 0xc58[29:25]",
+                  u1_tmp[0], (u4_tmp[0] & 0x3e000000) >> 25);
 
        u1_tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x8db);
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                "\r\n %-35s = 0x%x", "0x8db[6:5]",
-                ((u1_tmp[0] & 0x60) >> 5));
+       seq_printf(m, "\n %-35s = 0x%x", "0x8db[6:5]",
+                  ((u1_tmp[0] & 0x60) >> 5));
 
        u1_tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x975);
        u4_tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xcb4);
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
-                "0xcb4[29:28]/0xcb4[7:0]/0x974[9:8]",
-                (u4_tmp[0] & 0x30000000) >> 28,
-                 u4_tmp[0] & 0xff,
-                 u1_tmp[0] & 0x3);
+       seq_printf(m, "\n %-35s = 0x%x/ 0x%x/ 0x%x",
+                  "0xcb4[29:28]/0xcb4[7:0]/0x974[9:8]",
+                  (u4_tmp[0] & 0x30000000) >> 28,
+                   u4_tmp[0] & 0xff,
+                   u1_tmp[0] & 0x3);
 
        u1_tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x40);
        u4_tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x4c);
        u1_tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0x64);
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
-                "0x40/0x4c[24:23]/0x64[0]",
-                u1_tmp[0], ((u4_tmp[0] & 0x01800000) >> 23), u1_tmp[1] & 0x1);
+       seq_printf(m, "\n %-35s = 0x%x/ 0x%x/ 0x%x",
+                  "0x40/0x4c[24:23]/0x64[0]",
+                  u1_tmp[0], ((u4_tmp[0] & 0x01800000) >> 23),
+                  u1_tmp[1] & 0x1);
 
        u4_tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x550);
        u1_tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x522);
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                "\r\n %-35s = 0x%x/ 0x%x", "0x550(bcn ctrl)/0x522",
-                u4_tmp[0], u1_tmp[0]);
+       seq_printf(m, "\n %-35s = 0x%x/ 0x%x", "0x550(bcn ctrl)/0x522",
+                  u4_tmp[0], u1_tmp[0]);
 
        u4_tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xc50);
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                "\r\n %-35s = 0x%x", "0xc50(dig)",
-                u4_tmp[0] & 0xff);
+       seq_printf(m, "\n %-35s = 0x%x", "0xc50(dig)",
+                  u4_tmp[0] & 0xff);
 
        u4_tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xf48);
        u1_tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0xa5d);
        u1_tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0xa5c);
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                "\r\n %-35s = 0x%x/ 0x%x", "OFDM-FA/ CCK-FA",
-                u4_tmp[0], (u1_tmp[0] << 8) + u1_tmp[1]);
+       seq_printf(m, "\n %-35s = 0x%x/ 0x%x", "OFDM-FA/ CCK-FA",
+                  u4_tmp[0], (u1_tmp[0] << 8) + u1_tmp[1]);
 
        u4_tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x6c0);
        u4_tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x6c4);
        u4_tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x6c8);
        u1_tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x6cc);
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x",
-                "0x6c0/0x6c4/0x6c8/0x6cc(coexTable)",
+       seq_printf(m, "\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x",
+                  "0x6c0/0x6c4/0x6c8/0x6cc(coexTable)",
                   u4_tmp[0], u4_tmp[1], u4_tmp[2], u1_tmp[0]);
 
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                "\r\n %-35s = %d/ %d", "0x770(high-pri rx/tx)",
-                coex_sta->high_priority_rx, coex_sta->high_priority_tx);
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                "\r\n %-35s = %d/ %d", "0x774(low-pri rx/tx)",
-                coex_sta->low_priority_rx, coex_sta->low_priority_tx);
+       seq_printf(m, "\n %-35s = %d/ %d", "0x770(high-pri rx/tx)",
+                  coex_sta->high_priority_rx, coex_sta->high_priority_tx);
+       seq_printf(m, "\n %-35s = %d/ %d", "0x774(low-pri rx/tx)",
+                  coex_sta->low_priority_rx, coex_sta->low_priority_tx);
        if (btcoexist->auto_report_1ant)
                btc8821a1ant_monitor_bt_ctr(btcoexist);
-       btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_COEX_STATISTICS);
+       btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_COEX_STATISTICS, m);
 }
 
 void ex_btc8821a1ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
index b0a6626..a498ff5 100644 (file)
@@ -186,7 +186,8 @@ void ex_btc8821a1ant_bt_info_notify(struct btc_coexist *btcoexist,
 void ex_btc8821a1ant_halt_notify(struct btc_coexist *btcoexist);
 void ex_btc8821a1ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnpstate);
 void ex_btc8821a1ant_periodical(struct btc_coexist *btcoexist);
-void ex_btc8821a1ant_display_coex_info(struct btc_coexist *btcoexist);
+void ex_btc8821a1ant_display_coex_info(struct btc_coexist *btcoexist,
+                                      struct seq_file *m);
 void ex_btc8821a1ant_dbg_control(struct btc_coexist *btcoexist, u8 op_code,
                                 u8 op_len, u8 *data);
 void ex_btc8821a1ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state);
index 41943c3..d5f282c 100644 (file)
@@ -3657,11 +3657,11 @@ void ex_btc8821a2ant_init_coex_dm(struct btc_coexist *btcoexist)
        btc8821a2ant_init_coex_dm(btcoexist);
 }
 
-void ex_btc8821a2ant_display_coex_info(struct btc_coexist *btcoexist)
+void ex_btc8821a2ant_display_coex_info(struct btc_coexist *btcoexist,
+                                      struct seq_file *m)
 {
        struct btc_board_info *board_info = &btcoexist->board_info;
        struct btc_stack_info *stack_info = &btcoexist->stack_info;
-       struct rtl_priv *rtlpriv = btcoexist->adapter;
        u8 u1tmp[4], i, bt_info_ext, ps_tdma_case = 0;
        u32 u4tmp[4];
        bool roam = false, scan = false, link = false, wifi_under_5g = false;
@@ -3671,32 +3671,22 @@ void ex_btc8821a2ant_display_coex_info(struct btc_coexist *btcoexist)
        u8 wifi_dot_11_chnl, wifi_hs_chnl;
        u32 fw_ver = 0, bt_patch_ver = 0;
 
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                "\r\n ============[BT Coexist info]============");
-
-       if (!board_info->bt_exist) {
-               RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n BT not exists !!!");
-               return;
-       }
+       seq_puts(m, "\n ============[BT Coexist info]============");
 
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                "\r\n %-35s = %d/ %d ", "Ant PG number/ Ant mechanism:",
-                board_info->pg_ant_num, board_info->btdm_ant_num);
+       seq_printf(m, "\n %-35s = %d/ %d ", "Ant PG number/ Ant mechanism:",
+                  board_info->pg_ant_num, board_info->btdm_ant_num);
 
        if (btcoexist->manual_control) {
-               RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                        "\r\n %-35s", "[Action Manual control]!!");
+               seq_printf(m, "\n %-35s", "[Action Manual control]!!");
        }
 
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                "\r\n %-35s = %s / %d", "BT stack/ hci ext ver",
+       seq_printf(m, "\n %-35s = %s / %d", "BT stack/ hci ext ver",
                   ((stack_info->profile_notified) ? "Yes" : "No"),
                   stack_info->hci_version);
 
        btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER, &bt_patch_ver);
        btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                "\r\n %-35s = %d_%d/ 0x%x/ 0x%x(%d)",
+       seq_printf(m, "\n %-35s = %d_%d/ 0x%x/ 0x%x(%d)",
                   "CoexVer/ FwVer/ PatchVer",
                   glcoex_ver_date_8821a_2ant, glcoex_ver_8821a_2ant,
                   fw_ver, bt_patch_ver, bt_patch_ver);
@@ -3707,27 +3697,23 @@ void ex_btc8821a2ant_display_coex_info(struct btc_coexist *btcoexist)
                BTC_GET_U1_WIFI_DOT11_CHNL, &wifi_dot_11_chnl);
        btcoexist->btc_get(btcoexist,
                BTC_GET_U1_WIFI_HS_CHNL, &wifi_hs_chnl);
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                "\r\n %-35s = %d / %d(%d)",
+       seq_printf(m, "\n %-35s = %d / %d(%d)",
                   "Dot11 channel / HsMode(HsChnl)",
                   wifi_dot_11_chnl, bt_hs_on, wifi_hs_chnl);
 
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                "\r\n %-35s = %3ph ",
+       seq_printf(m, "\n %-35s = %3ph ",
                   "H2C Wifi inform bt chnl Info",
                   coex_dm->wifi_chnl_info);
 
        btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifi_rssi);
        btcoexist->btc_get(btcoexist, BTC_GET_S4_HS_RSSI, &bt_hs_rssi);
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                "\r\n %-35s = %ld/ %ld", "Wifi rssi/ HS rssi",
+       seq_printf(m, "\n %-35s = %ld/ %ld", "Wifi rssi/ HS rssi",
                   wifi_rssi, bt_hs_rssi);
 
        btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
        btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
        btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam);
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                "\r\n %-35s = %d/ %d/ %d ", "Wifi link/ roam/ scan",
+       seq_printf(m, "\n %-35s = %d/ %d/ %d ", "Wifi link/ roam/ scan",
                   link, roam, scan);
 
        btcoexist->btc_get(btcoexist,
@@ -3738,8 +3724,7 @@ void ex_btc8821a2ant_display_coex_info(struct btc_coexist *btcoexist)
                BTC_GET_BL_WIFI_BUSY, &wifi_busy);
        btcoexist->btc_get(btcoexist,
                BTC_GET_U4_WIFI_TRAFFIC_DIRECTION, &wifi_traffic_dir);
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                "\r\n %-35s = %s / %s/ %s ", "Wifi status",
+       seq_printf(m, "\n %-35s = %s / %s/ %s ", "Wifi status",
                   (wifi_under_5g ? "5G" : "2.4G"),
                   ((BTC_WIFI_BW_LEGACY == wifi_bw) ? "Legacy" :
                    (((BTC_WIFI_BW_HT40 == wifi_bw) ? "HT40" : "HT20"))),
@@ -3748,134 +3733,128 @@ void ex_btc8821a2ant_display_coex_info(struct btc_coexist *btcoexist)
                     "uplink" : "downlink")));
 
        if (stack_info->profile_notified) {
-               RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                        "\r\n %-35s = %d / %d / %d / %d", "SCO/HID/PAN/A2DP",
+               seq_printf(m, "\n %-35s = %d / %d / %d / %d",
+                          "SCO/HID/PAN/A2DP",
                           stack_info->sco_exist, stack_info->hid_exist,
                           stack_info->pan_exist, stack_info->a2dp_exist);
 
                btcoexist->btc_disp_dbg_msg(btcoexist,
-                                           BTC_DBG_DISP_BT_LINK_INFO);
+                                           BTC_DBG_DISP_BT_LINK_INFO,
+                                           m);
        }
 
        bt_info_ext = coex_sta->bt_info_ext;
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s",
-                "BT Info A2DP rate",
+       seq_printf(m, "\n %-35s = %s", "BT Info A2DP rate",
                   (bt_info_ext&BIT0) ? "Basic rate" : "EDR rate");
 
        for (i = 0; i < BT_INFO_SRC_8821A_2ANT_MAX; i++) {
                if (coex_sta->bt_info_c2h_cnt[i]) {
-                       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                                "\r\n %-35s = %7ph(%d)",
-                                glbt_info_src_8821a_2ant[i],
-                                coex_sta->bt_info_c2h[i],
-                                coex_sta->bt_info_c2h_cnt[i]);
+                       seq_printf(m, "\n %-35s = %7ph(%d)",
+                                  glbt_info_src_8821a_2ant[i],
+                                  coex_sta->bt_info_c2h[i],
+                                  coex_sta->bt_info_c2h_cnt[i]);
                }
        }
 
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s/%s",
-                "PS state, IPS/LPS",
-                ((coex_sta->under_ips ? "IPS ON" : "IPS OFF")),
-                ((coex_sta->under_lps ? "LPS ON" : "LPS OFF")));
-       btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_FW_PWR_MODE_CMD);
+       seq_printf(m, "\n %-35s = %s/%s",
+                  "PS state, IPS/LPS",
+                  ((coex_sta->under_ips ? "IPS ON" : "IPS OFF")),
+                  ((coex_sta->under_lps ? "LPS ON" : "LPS OFF")));
+       btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_FW_PWR_MODE_CMD, m);
 
        /* Sw mechanism*/
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s",
-                "============[Sw mechanism]============");
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                "\r\n %-35s = %d/ %d/ %d(0x%x) ",
-                "SM2[AgcT/ AdcB/ SwDacSwing(lvl)]",
-                coex_dm->cur_agc_table_en, coex_dm->cur_adc_back_off,
-                coex_dm->cur_dac_swing_on, coex_dm->cur_dac_swing_lvl);
+       seq_printf(m, "\n %-35s",
+                  "============[Sw mechanism]============");
+       seq_printf(m, "\n %-35s = %d/ %d/ %d(0x%x) ",
+                  "SM2[AgcT/ AdcB/ SwDacSwing(lvl)]",
+                  coex_dm->cur_agc_table_en, coex_dm->cur_adc_back_off,
+                  coex_dm->cur_dac_swing_on, coex_dm->cur_dac_swing_lvl);
 
        /* Fw mechanism*/
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s",
-                "============[Fw mechanism]============");
+       seq_printf(m, "\n %-35s",
+                  "============[Fw mechanism]============");
 
        if (!btcoexist->manual_control) {
                ps_tdma_case = coex_dm->cur_ps_tdma;
-               RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                        "\r\n %-35s = %5ph case-%d",
-                        "PS TDMA",
-                        coex_dm->ps_tdma_para, ps_tdma_case);
-
-               RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                        "\r\n %-35s = %d/ %d ", "DecBtPwr/ IgnWlanAct",
-                        coex_dm->cur_dec_bt_pwr_lvl,
-                        coex_dm->cur_ignore_wlan_act);
+               seq_printf(m, "\n %-35s = %5ph case-%d",
+                          "PS TDMA",
+                          coex_dm->ps_tdma_para, ps_tdma_case);
+
+               seq_printf(m, "\n %-35s = %d/ %d ", "DecBtPwr/ IgnWlanAct",
+                          coex_dm->cur_dec_bt_pwr_lvl,
+                          coex_dm->cur_ignore_wlan_act);
        }
 
        /* Hw setting*/
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                "\r\n %-35s", "============[Hw setting]============");
+       seq_printf(m, "\n %-35s", "============[Hw setting]============");
 
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                "\r\n %-35s = 0x%x", "RF-A, 0x1e initVal",
-                coex_dm->bt_rf0x1e_backup);
+       seq_printf(m, "\n %-35s = 0x%x", "RF-A, 0x1e initVal",
+                  coex_dm->bt_rf0x1e_backup);
 
        u1tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x778);
        u1tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0x6cc);
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x ",
-                "0x778 (W_Act)/ 0x6cc (CoTab Sel)",
-                u1tmp[0], u1tmp[1]);
+       seq_printf(m, "\n %-35s = 0x%x/ 0x%x ",
+                  "0x778 (W_Act)/ 0x6cc (CoTab Sel)",
+                  u1tmp[0], u1tmp[1]);
 
        u1tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x8db);
        u1tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0xc5b);
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x",
-                "0x8db(ADC)/0xc5b[29:25](DAC)",
-                ((u1tmp[0] & 0x60) >> 5), ((u1tmp[1] & 0x3e) >> 1));
+       seq_printf(m, "\n %-35s = 0x%x/ 0x%x",
+                  "0x8db(ADC)/0xc5b[29:25](DAC)",
+                  ((u1tmp[0] & 0x60) >> 5), ((u1tmp[1] & 0x3e) >> 1));
 
        u4tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xcb4);
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x",
-                "0xcb4[7:0](ctrl)/ 0xcb4[29:28](val)",
-                u4tmp[0] & 0xff, ((u4tmp[0] & 0x30000000) >> 28));
+       seq_printf(m, "\n %-35s = 0x%x/ 0x%x",
+                  "0xcb4[7:0](ctrl)/ 0xcb4[29:28](val)",
+                  u4tmp[0] & 0xff, ((u4tmp[0] & 0x30000000) >> 28));
 
        u1tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x40);
        u4tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x4c);
        u4tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x974);
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
-                "0x40/ 0x4c[24:23]/ 0x974",
-                u1tmp[0], ((u4tmp[0] & 0x01800000) >> 23), u4tmp[1]);
+       seq_printf(m, "\n %-35s = 0x%x/ 0x%x/ 0x%x",
+                  "0x40/ 0x4c[24:23]/ 0x974",
+                  u1tmp[0], ((u4tmp[0] & 0x01800000) >> 23), u4tmp[1]);
 
        u4tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x550);
        u1tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x522);
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x",
-                "0x550(bcn ctrl)/0x522",
-                u4tmp[0], u1tmp[0]);
+       seq_printf(m, "\n %-35s = 0x%x/ 0x%x",
+                  "0x550(bcn ctrl)/0x522",
+                  u4tmp[0], u1tmp[0]);
 
        u4tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xc50);
        u1tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0xa0a);
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x",
-                "0xc50(DIG)/0xa0a(CCK-TH)",
-                u4tmp[0], u1tmp[0]);
+       seq_printf(m, "\n %-35s = 0x%x/ 0x%x",
+                  "0xc50(DIG)/0xa0a(CCK-TH)",
+                  u4tmp[0], u1tmp[0]);
 
        u4tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xf48);
        u1tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0xa5b);
        u1tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0xa5c);
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x",
-                "OFDM-FA/ CCK-FA",
-                u4tmp[0], (u1tmp[0] << 8) + u1tmp[1]);
+       seq_printf(m, "\n %-35s = 0x%x/ 0x%x",
+                  "OFDM-FA/ CCK-FA",
+                  u4tmp[0], (u1tmp[0] << 8) + u1tmp[1]);
 
        u4tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x6c0);
        u4tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x6c4);
        u4tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x6c8);
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
-                "0x6c0/0x6c4/0x6c8",
-                u4tmp[0], u4tmp[1], u4tmp[2]);
-
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d",
-                "0x770 (hi-pri Rx/Tx)",
-                coex_sta->high_priority_rx, coex_sta->high_priority_tx);
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d",
+       seq_printf(m, "\n %-35s = 0x%x/ 0x%x/ 0x%x",
+                  "0x6c0/0x6c4/0x6c8",
+                  u4tmp[0], u4tmp[1], u4tmp[2]);
+
+       seq_printf(m, "\n %-35s = %d/ %d",
+                  "0x770 (hi-pri Rx/Tx)",
+                  coex_sta->high_priority_rx, coex_sta->high_priority_tx);
+       seq_printf(m, "\n %-35s = %d/ %d",
                   "0x774(low-pri Rx/Tx)",
                   coex_sta->low_priority_rx, coex_sta->low_priority_tx);
 
        /* Tx mgnt queue hang or not, 0x41b should = 0xf, ex: 0xd ==>hang*/
        u1tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x41b);
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x",
-                "0x41b (mgntQ hang chk == 0xf)",
-                u1tmp[0]);
+       seq_printf(m, "\n %-35s = 0x%x",
+                  "0x41b (mgntQ hang chk == 0xf)",
+                  u1tmp[0]);
 
-       btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_COEX_STATISTICS);
+       btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_COEX_STATISTICS, m);
 }
 
 void ex_btc8821a2ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
index a839d55..ce3e58c 100644 (file)
@@ -226,7 +226,8 @@ ex_btc8821a2ant_periodical(
        );
 void
 ex_btc8821a2ant_display_coex_info(
-       struct btc_coexist *btcoexist
+       struct btc_coexist *btcoexist,
+       struct seq_file *m
        );
 void ex_btc8821a2ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state);
 void ex_btc8821a2ant_pre_load_firmware(struct btc_coexist *btcoexist);
index b5e9877..5f3eda3 100644 (file)
@@ -653,6 +653,105 @@ static bool halbtc_set(void *void_btcoexist, u8 set_type, void *in_buf)
        return ret;
 }
 
+static void halbtc_display_coex_statistics(struct btc_coexist *btcoexist,
+                                          struct seq_file *m)
+{
+}
+
+static void halbtc_display_bt_link_info(struct btc_coexist *btcoexist,
+                                       struct seq_file *m)
+{
+}
+
+static void halbtc_display_wifi_status(struct btc_coexist *btcoexist,
+                                      struct seq_file *m)
+{
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+       s32 wifi_rssi = 0, bt_hs_rssi = 0;
+       bool scan = false, link = false, roam = false, wifi_busy = false;
+       bool wifi_under_b_mode = false;
+       bool wifi_under_5g = false;
+       u32 wifi_bw = BTC_WIFI_BW_HT20;
+       u32 wifi_traffic_dir = BTC_WIFI_TRAFFIC_TX;
+       u32 wifi_freq = BTC_FREQ_2_4G;
+       u32 wifi_link_status = 0x0;
+       bool bt_hs_on = false, under_ips = false, under_lps = false;
+       bool low_power = false, dc_mode = false;
+       u8 wifi_chnl = 0, wifi_hs_chnl = 0, fw_ps_state;
+       u8 ap_num = 0;
+
+       wifi_link_status = halbtc_get_wifi_link_status(btcoexist);
+       seq_printf(m, "\n %-35s = %d/ %d/ %d/ %d/ %d",
+                  "STA/vWifi/HS/p2pGo/p2pGc",
+                  ((wifi_link_status & WIFI_STA_CONNECTED) ? 1 : 0),
+                  ((wifi_link_status & WIFI_AP_CONNECTED) ? 1 : 0),
+                  ((wifi_link_status & WIFI_HS_CONNECTED) ? 1 : 0),
+                  ((wifi_link_status & WIFI_P2P_GO_CONNECTED) ? 1 : 0),
+                  ((wifi_link_status & WIFI_P2P_GC_CONNECTED) ? 1 : 0));
+
+       btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
+       btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_DOT11_CHNL, &wifi_chnl);
+       btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_HS_CHNL, &wifi_hs_chnl);
+       seq_printf(m, "\n %-35s = %d / %d(%d)",
+                  "Dot11 channel / HsChnl(High Speed)",
+                  wifi_chnl, wifi_hs_chnl, bt_hs_on);
+
+       btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifi_rssi);
+       btcoexist->btc_get(btcoexist, BTC_GET_S4_HS_RSSI, &bt_hs_rssi);
+       seq_printf(m, "\n %-35s = %d/ %d",
+                  "Wifi rssi/ HS rssi",
+                  wifi_rssi - 100, bt_hs_rssi - 100);
+
+       btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
+       btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
+       btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam);
+       seq_printf(m, "\n %-35s = %d/ %d/ %d ",
+                  "Wifi link/ roam/ scan",
+                  link, roam, scan);
+
+       btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g);
+       btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+       btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
+       btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_TRAFFIC_DIRECTION,
+                          &wifi_traffic_dir);
+       btcoexist->btc_get(btcoexist, BTC_GET_U1_AP_NUM, &ap_num);
+       wifi_freq = (wifi_under_5g ? BTC_FREQ_5G : BTC_FREQ_2_4G);
+       btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_B_MODE,
+                          &wifi_under_b_mode);
+
+       seq_printf(m, "\n %-35s = %s / %s/ %s/ AP=%d ",
+                  "Wifi freq/ bw/ traffic",
+                  gl_btc_wifi_freq_string[wifi_freq],
+                  ((wifi_under_b_mode) ? "11b" :
+                   gl_btc_wifi_bw_string[wifi_bw]),
+                  ((!wifi_busy) ? "idle" : ((BTC_WIFI_TRAFFIC_TX ==
+                                             wifi_traffic_dir) ? "uplink" :
+                                            "downlink")),
+                  ap_num);
+
+       /* power status  */
+       dc_mode = true; /*TODO*/
+       under_ips = rtlpriv->psc.inactive_pwrstate == ERFOFF ? 1 : 0;
+       under_lps = rtlpriv->psc.dot11_psmode == EACTIVE ? 0 : 1;
+       fw_ps_state = 0;
+       low_power = 0; /*TODO*/
+       seq_printf(m, "\n %-35s = %s%s%s%s",
+                  "Power Status",
+                  (dc_mode ? "DC mode" : "AC mode"),
+                  (under_ips ? ", IPS ON" : ""),
+                  (under_lps ? ", LPS ON" : ""),
+                  (low_power ? ", 32k" : ""));
+
+       seq_printf(m,
+                  "\n %-35s = %02x %02x %02x %02x %02x %02x (0x%x/0x%x)",
+                  "Power mode cmd(lps/rpwm)",
+                  btcoexist->pwr_mode_val[0], btcoexist->pwr_mode_val[1],
+                  btcoexist->pwr_mode_val[2], btcoexist->pwr_mode_val[3],
+                  btcoexist->pwr_mode_val[4], btcoexist->pwr_mode_val[5],
+                  btcoexist->bt_info.lps_val,
+                  btcoexist->bt_info.rpwm_val);
+}
+
 /************************************************************
  *             IO related function
  ************************************************************/
@@ -831,6 +930,26 @@ void halbtc_set_bt_reg(void *btc_context, u8 reg_type, u32 offset, u32 set_val)
        }
 }
 
+static void halbtc_display_dbg_msg(void *bt_context, u8 disp_type,
+                                  struct seq_file *m)
+{
+       struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context;
+
+       switch (disp_type) {
+       case BTC_DBG_DISP_COEX_STATISTICS:
+               halbtc_display_coex_statistics(btcoexist, m);
+               break;
+       case BTC_DBG_DISP_BT_LINK_INFO:
+               halbtc_display_bt_link_info(btcoexist, m);
+               break;
+       case BTC_DBG_DISP_WIFI_STATUS:
+               halbtc_display_wifi_status(btcoexist, m);
+               break;
+       default:
+               break;
+       }
+}
+
 bool halbtc_under_ips(struct btc_coexist *btcoexist)
 {
        struct rtl_priv *rtlpriv = btcoexist->adapter;
@@ -874,6 +993,7 @@ bool exhalbtc_initlize_variables(void)
        btcoexist->btc_get_rf_reg = halbtc_get_rfreg;
 
        btcoexist->btc_fill_h2c = halbtc_fill_h2c_cmd;
+       btcoexist->btc_disp_dbg_msg = halbtc_display_dbg_msg;
 
        btcoexist->btc_get = halbtc_get;
        btcoexist->btc_set = halbtc_set;
@@ -1513,7 +1633,8 @@ void exhalbtc_set_single_ant_path(u8 single_ant_path)
        gl_bt_coexist.board_info.single_ant_path = single_ant_path;
 }
 
-void exhalbtc_display_bt_coex_info(struct btc_coexist *btcoexist)
+void exhalbtc_display_bt_coex_info(struct btc_coexist *btcoexist,
+                                  struct seq_file *m)
 {
        if (!halbtc_is_bt_coexist_available(btcoexist))
                return;
@@ -1522,17 +1643,17 @@ void exhalbtc_display_bt_coex_info(struct btc_coexist *btcoexist)
 
        if (IS_HARDWARE_TYPE_8821(btcoexist->adapter)) {
                if (btcoexist->board_info.btdm_ant_num == 2)
-                       ex_btc8821a2ant_display_coex_info(btcoexist);
+                       ex_btc8821a2ant_display_coex_info(btcoexist, m);
                else if (btcoexist->board_info.btdm_ant_num == 1)
-                       ex_btc8821a1ant_display_coex_info(btcoexist);
+                       ex_btc8821a1ant_display_coex_info(btcoexist, m);
        } else if (IS_HARDWARE_TYPE_8723B(btcoexist->adapter)) {
                if (btcoexist->board_info.btdm_ant_num == 2)
-                       ex_btc8723b2ant_display_coex_info(btcoexist);
+                       ex_btc8723b2ant_display_coex_info(btcoexist, m);
                else if (btcoexist->board_info.btdm_ant_num == 1)
-                       ex_btc8723b1ant_display_coex_info(btcoexist);
+                       ex_btc8723b1ant_display_coex_info(btcoexist, m);
        } else if (IS_HARDWARE_TYPE_8192E(btcoexist->adapter)) {
                if (btcoexist->board_info.btdm_ant_num == 2)
-                       ex_btc8192e2ant_display_coex_info(btcoexist);
+                       ex_btc8192e2ant_display_coex_info(btcoexist, m);
        }
 
        halbtc_normal_low_power(btcoexist);
index f9b87c1..ea12b9d 100644 (file)
@@ -152,7 +152,6 @@ struct btc_board_info {
        u8 btdm_ant_num;        /* ant number for btdm */
        u8 btdm_ant_pos;
        u8 single_ant_path; /* current used for 8723b only, 1=>s0,  0=>s1 */
-       bool bt_exist;
        bool tfbga_package;
 };
 
@@ -181,6 +180,12 @@ enum btc_wifi_role {
        BTC_ROLE_MAX
 };
 
+enum btc_wireless_freq {
+       BTC_FREQ_2_4G = 0x0,
+       BTC_FREQ_5G = 0x1,
+       BTC_FREQ_MAX
+};
+
 enum btc_wifi_bw_mode {
        BTC_WIFI_BW_LEGACY = 0x0,
        BTC_WIFI_BW_HT20 = 0x1,
@@ -355,6 +360,7 @@ enum btc_dbg_disp_type {
        BTC_DBG_DISP_BT_LINK_INFO = 0x1,
        BTC_DBG_DISP_BT_FW_VER = 0x2,
        BTC_DBG_DISP_FW_PWR_MODE_CMD = 0x3,
+       BTC_DBG_DISP_WIFI_STATUS = 0x04,
        BTC_DBG_DISP_MAX
 };
 
@@ -458,7 +464,8 @@ typedef     bool (*bfp_btc_set)(void *btcoexist, u8 set_type, void *in_buf);
 typedef void (*bfp_btc_set_bt_reg)(void *btc_context, u8 reg_type, u32 offset,
                                   u32 value);
 
-typedef void (*bfp_btc_disp_dbg_msg)(void *btcoexist, u8 disp_type);
+typedef void (*bfp_btc_disp_dbg_msg)(void *btcoexist, u8 disp_type,
+                                    struct seq_file *m);
 
 struct btc_bt_info {
        bool bt_disabled;
@@ -626,7 +633,8 @@ void exhalbtc_update_min_bt_rssi(s8 bt_rssi);
 void exhalbtc_set_bt_exist(bool bt_exist);
 void exhalbtc_set_chip_type(u8 chip_type);
 void exhalbtc_set_ant_num(struct rtl_priv *rtlpriv, u8 type, u8 ant_num);
-void exhalbtc_display_bt_coex_info(struct btc_coexist *btcoexist);
+void exhalbtc_display_bt_coex_info(struct btc_coexist *btcoexist,
+                                  struct seq_file *m);
 void exhalbtc_signal_compensation(struct btc_coexist *btcoexist,
                                  u8 *rssi_wifi, u8 *rssi_bt);
 void exhalbtc_lps_leave(struct btc_coexist *btcoexist);
index 7d296a4..4d9e330 100644 (file)
@@ -52,8 +52,14 @@ static struct rtl_btc_ops rtl_btc_operation = {
        .btc_is_bt_ctrl_lps = rtl_btc_is_bt_ctrl_lps,
        .btc_is_bt_lps_on = rtl_btc_is_bt_lps_on,
        .btc_get_ampdu_cfg = rtl_btc_get_ampdu_cfg,
+       .btc_display_bt_coex_info = rtl_btc_display_bt_coex_info,
 };
 
+void rtl_btc_display_bt_coex_info(struct rtl_priv *rtlpriv, struct seq_file *m)
+{
+       exhalbtc_display_bt_coex_info(&gl_bt_coexist, m);
+}
+
 void rtl_btc_record_pwr_mode(struct rtl_priv *rtlpriv, u8 *buf, u8 len)
 {
        u8 safe_len;
index ac1253c..40f1ce8 100644 (file)
@@ -44,6 +44,7 @@ bool rtl_btc_is_limited_dig(struct rtl_priv *rtlpriv);
 bool rtl_btc_is_disable_edca_turbo(struct rtl_priv *rtlpriv);
 bool rtl_btc_is_bt_disabled(struct rtl_priv *rtlpriv);
 void rtl_btc_special_packet_notify(struct rtl_priv *rtlpriv, u8 pkt_type);
+void rtl_btc_display_bt_coex_info(struct rtl_priv *rtlpriv, struct seq_file *m);
 void rtl_btc_record_pwr_mode(struct rtl_priv *rtlpriv, u8 *buf, u8 len);
 u8   rtl_btc_get_lps_val(struct rtl_priv *rtlpriv);
 u8   rtl_btc_get_rpwm_val(struct rtl_priv *rtlpriv);
index 3cb8882..a78b828 100644 (file)
@@ -345,9 +345,9 @@ static void rtl_op_remove_interface(struct ieee80211_hw *hw,
        mutex_lock(&rtlpriv->locks.conf_mutex);
 
        /* Free beacon resources */
-       if ((vif->type == NL80211_IFTYPE_AP) ||
-           (vif->type == NL80211_IFTYPE_ADHOC) ||
-           (vif->type == NL80211_IFTYPE_MESH_POINT)) {
+       if (vif->type == NL80211_IFTYPE_AP ||
+           vif->type == NL80211_IFTYPE_ADHOC ||
+           vif->type == NL80211_IFTYPE_MESH_POINT) {
                if (mac->beacon_enabled == 1) {
                        mac->beacon_enabled = 0;
                        rtlpriv->cfg->ops->update_interrupt_mask(hw, 0,
@@ -858,8 +858,8 @@ static void rtl_op_configure_filter(struct ieee80211_hw *hw,
         * here just used for linked scanning, & linked
         * and nolink check bssid is set in set network_type
         */
-       if ((changed_flags & FIF_BCN_PRBRESP_PROMISC) &&
-           (mac->link_state >= MAC80211_LINKED)) {
+       if (changed_flags & FIF_BCN_PRBRESP_PROMISC &&
+           mac->link_state >= MAC80211_LINKED) {
                if (mac->opmode != NL80211_IFTYPE_AP &&
                    mac->opmode != NL80211_IFTYPE_MESH_POINT) {
                        if (*new_flags & FIF_BCN_PRBRESP_PROMISC)
@@ -1044,10 +1044,10 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
        struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
 
        mutex_lock(&rtlpriv->locks.conf_mutex);
-       if ((vif->type == NL80211_IFTYPE_ADHOC) ||
-           (vif->type == NL80211_IFTYPE_AP) ||
-           (vif->type == NL80211_IFTYPE_MESH_POINT)) {
-               if ((changed & BSS_CHANGED_BEACON) ||
+       if (vif->type == NL80211_IFTYPE_ADHOC ||
+           vif->type == NL80211_IFTYPE_AP ||
+           vif->type == NL80211_IFTYPE_MESH_POINT) {
+               if (changed & BSS_CHANGED_BEACON ||
                    (changed & BSS_CHANGED_BEACON_ENABLED &&
                     bss_conf->enable_beacon)) {
                        if (mac->beacon_enabled == 0) {
@@ -1513,9 +1513,9 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
                return -ENOSPC; /*User disabled HW-crypto */
        }
        /* To support IBSS, use sw-crypto for GTK */
-       if (((vif->type == NL80211_IFTYPE_ADHOC) ||
-           (vif->type == NL80211_IFTYPE_MESH_POINT)) &&
-          !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+       if ((vif->type == NL80211_IFTYPE_ADHOC ||
+            vif->type == NL80211_IFTYPE_MESH_POINT) &&
+           !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
                return -ENOSPC;
        RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
                 "%s hardware based encryption for keyidx: %d, mac: %pM\n",
@@ -1588,7 +1588,7 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
                        rtlpriv->cfg->ops->enable_hw_sec(hw);
                }
        } else {
-               if ((!group_key) || (vif->type == NL80211_IFTYPE_ADHOC) ||
+               if (!group_key || vif->type == NL80211_IFTYPE_ADHOC ||
                    rtlpriv->sec.pairwise_enc_algorithm == NO_ENCRYPTION) {
                        if (rtlpriv->sec.pairwise_enc_algorithm ==
                            NO_ENCRYPTION &&
@@ -1775,7 +1775,7 @@ bool rtl_hal_pwrseqcmdparsing(struct rtl_priv *rtlpriv, u8 cut_version,
                                break;
                        case PWR_CMD_WRITE:
                                RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
-                                       "rtl_hal_pwrseqcmdparsing(): PWR_CMD_WRITE\n");
+                                        "%s(): PWR_CMD_WRITE\n", __func__);
                                offset = GET_PWR_CFG_OFFSET(cfg_cmd);
 
                                /*Read the value from system register*/
index 38fef6d..d70385b 100644 (file)
  *****************************************************************************/
 
 #include "wifi.h"
+#include "cam.h"
 
 #include <linux/moduleparam.h>
+#include <linux/vmalloc.h>
 
 #ifdef CONFIG_RTLWIFI_DEBUG
 void _rtl_dbg_trace(struct rtl_priv *rtlpriv, u64 comp, int level,
                    const char *fmt, ...)
 {
        if (unlikely((comp & rtlpriv->cfg->mod_params->debug_mask) &&
-                    (level <= rtlpriv->cfg->mod_params->debug_level))) {
+                    level <= rtlpriv->cfg->mod_params->debug_level)) {
                struct va_format vaf;
                va_list args;
 
@@ -51,7 +53,7 @@ void _rtl_dbg_print(struct rtl_priv *rtlpriv, u64 comp, int level,
                    const char *fmt, ...)
 {
        if (unlikely((comp & rtlpriv->cfg->mod_params->debug_mask) &&
-                    (level <= rtlpriv->cfg->mod_params->debug_level))) {
+                    level <= rtlpriv->cfg->mod_params->debug_level)) {
                struct va_format vaf;
                va_list args;
 
@@ -81,4 +83,481 @@ void _rtl_dbg_print_data(struct rtl_priv *rtlpriv, u64 comp, int level,
 }
 EXPORT_SYMBOL_GPL(_rtl_dbg_print_data);
 
+struct rtl_debugfs_priv {
+       struct rtl_priv *rtlpriv;
+       int (*cb_read)(struct seq_file *m, void *v);
+       ssize_t (*cb_write)(struct file *filp, const char __user *buffer,
+                           size_t count, loff_t *loff);
+       u32 cb_data;
+};
+
+static struct dentry *debugfs_topdir;
+
+static int rtl_debug_get_common(struct seq_file *m, void *v)
+{
+       struct rtl_debugfs_priv *debugfs_priv = m->private;
+
+       return debugfs_priv->cb_read(m, v);
+}
+
+static int dl_debug_open_common(struct inode *inode, struct file *file)
+{
+       return single_open(file, rtl_debug_get_common, inode->i_private);
+}
+
+static const struct file_operations file_ops_common = {
+       .open = dl_debug_open_common,
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = seq_release,
+};
+
+static int rtl_debug_get_mac_page(struct seq_file *m, void *v)
+{
+       struct rtl_debugfs_priv *debugfs_priv = m->private;
+       struct rtl_priv *rtlpriv = debugfs_priv->rtlpriv;
+       u32 page = debugfs_priv->cb_data;
+       int i, n;
+       int max = 0xff;
+
+       for (n = 0; n <= max; ) {
+               seq_printf(m, "\n%8.8x  ", n + page);
+               for (i = 0; i < 4 && n <= max; i++, n += 4)
+                       seq_printf(m, "%8.8x    ",
+                                  rtl_read_dword(rtlpriv, (page | n)));
+       }
+       seq_puts(m, "\n");
+       return 0;
+}
+
+#define RTL_DEBUG_IMPL_MAC_SERIES(page, addr)                  \
+static struct rtl_debugfs_priv rtl_debug_priv_mac_ ##page = {  \
+       .cb_read = rtl_debug_get_mac_page,                      \
+       .cb_data = addr,                                        \
+}
+
+RTL_DEBUG_IMPL_MAC_SERIES(0, 0x0000);
+RTL_DEBUG_IMPL_MAC_SERIES(1, 0x0100);
+RTL_DEBUG_IMPL_MAC_SERIES(2, 0x0200);
+RTL_DEBUG_IMPL_MAC_SERIES(3, 0x0300);
+RTL_DEBUG_IMPL_MAC_SERIES(4, 0x0400);
+RTL_DEBUG_IMPL_MAC_SERIES(5, 0x0500);
+RTL_DEBUG_IMPL_MAC_SERIES(6, 0x0600);
+RTL_DEBUG_IMPL_MAC_SERIES(7, 0x0700);
+RTL_DEBUG_IMPL_MAC_SERIES(10, 0x1000);
+RTL_DEBUG_IMPL_MAC_SERIES(11, 0x1100);
+RTL_DEBUG_IMPL_MAC_SERIES(12, 0x1200);
+RTL_DEBUG_IMPL_MAC_SERIES(13, 0x1300);
+RTL_DEBUG_IMPL_MAC_SERIES(14, 0x1400);
+RTL_DEBUG_IMPL_MAC_SERIES(15, 0x1500);
+RTL_DEBUG_IMPL_MAC_SERIES(16, 0x1600);
+RTL_DEBUG_IMPL_MAC_SERIES(17, 0x1700);
+
+static int rtl_debug_get_bb_page(struct seq_file *m, void *v)
+{
+       struct rtl_debugfs_priv *debugfs_priv = m->private;
+       struct rtl_priv *rtlpriv = debugfs_priv->rtlpriv;
+       struct ieee80211_hw *hw = rtlpriv->hw;
+       u32 page = debugfs_priv->cb_data;
+       int i, n;
+       int max = 0xff;
+
+       for (n = 0; n <= max; ) {
+               seq_printf(m, "\n%8.8x  ", n + page);
+               for (i = 0; i < 4 && n <= max; i++, n += 4)
+                       seq_printf(m, "%8.8x    ",
+                                  rtl_get_bbreg(hw, (page | n), 0xffffffff));
+       }
+       seq_puts(m, "\n");
+       return 0;
+}
+
+#define RTL_DEBUG_IMPL_BB_SERIES(page, addr)                   \
+static struct rtl_debugfs_priv rtl_debug_priv_bb_ ##page = {   \
+       .cb_read = rtl_debug_get_bb_page,                       \
+       .cb_data = addr,                                        \
+}
+
+RTL_DEBUG_IMPL_BB_SERIES(8, 0x0800);
+RTL_DEBUG_IMPL_BB_SERIES(9, 0x0900);
+RTL_DEBUG_IMPL_BB_SERIES(a, 0x0a00);
+RTL_DEBUG_IMPL_BB_SERIES(b, 0x0b00);
+RTL_DEBUG_IMPL_BB_SERIES(c, 0x0c00);
+RTL_DEBUG_IMPL_BB_SERIES(d, 0x0d00);
+RTL_DEBUG_IMPL_BB_SERIES(e, 0x0e00);
+RTL_DEBUG_IMPL_BB_SERIES(f, 0x0f00);
+RTL_DEBUG_IMPL_BB_SERIES(18, 0x1800);
+RTL_DEBUG_IMPL_BB_SERIES(19, 0x1900);
+RTL_DEBUG_IMPL_BB_SERIES(1a, 0x1a00);
+RTL_DEBUG_IMPL_BB_SERIES(1b, 0x1b00);
+RTL_DEBUG_IMPL_BB_SERIES(1c, 0x1c00);
+RTL_DEBUG_IMPL_BB_SERIES(1d, 0x1d00);
+RTL_DEBUG_IMPL_BB_SERIES(1e, 0x1e00);
+RTL_DEBUG_IMPL_BB_SERIES(1f, 0x1f00);
+
+static int rtl_debug_get_reg_rf(struct seq_file *m, void *v)
+{
+       struct rtl_debugfs_priv *debugfs_priv = m->private;
+       struct rtl_priv *rtlpriv = debugfs_priv->rtlpriv;
+       struct ieee80211_hw *hw = rtlpriv->hw;
+       enum radio_path rfpath = debugfs_priv->cb_data;
+       int i, n;
+       int max = 0x40;
+
+       if (IS_HARDWARE_TYPE_8822B(rtlpriv))
+               max = 0xff;
+
+       seq_printf(m, "\nPATH(%d)", rfpath);
+
+       for (n = 0; n <= max; ) {
+               seq_printf(m, "\n%8.8x  ", n);
+               for (i = 0; i < 4 && n <= max; n += 1, i++)
+                       seq_printf(m, "%8.8x    ",
+                                  rtl_get_rfreg(hw, rfpath, n, 0xffffffff));
+       }
+       seq_puts(m, "\n");
+       return 0;
+}
+
+#define RTL_DEBUG_IMPL_RF_SERIES(page, addr)                   \
+static struct rtl_debugfs_priv rtl_debug_priv_rf_ ##page = {   \
+       .cb_read = rtl_debug_get_reg_rf,                        \
+       .cb_data = addr,                                        \
+}
+
+RTL_DEBUG_IMPL_RF_SERIES(a, RF90_PATH_A);
+RTL_DEBUG_IMPL_RF_SERIES(b, RF90_PATH_B);
+
+static int rtl_debug_get_cam_register(struct seq_file *m, void *v)
+{
+       struct rtl_debugfs_priv *debugfs_priv = m->private;
+       struct rtl_priv *rtlpriv = debugfs_priv->rtlpriv;
+       int start = debugfs_priv->cb_data;
+       u32 target_cmd = 0;
+       u32 target_val = 0;
+       u8 entry_i = 0;
+       u32 ulstatus;
+       int i = 100, j = 0;
+       int end = (start + 11 > TOTAL_CAM_ENTRY ? TOTAL_CAM_ENTRY : start + 11);
+
+       /* This dump the current register page */
+       seq_printf(m,
+                  "\n#################### SECURITY CAM (%d-%d) ##################\n",
+                  start, end - 1);
+
+       for (j = start; j < end; j++) {
+               seq_printf(m, "\nD:  %2x > ", j);
+               for (entry_i = 0; entry_i < CAM_CONTENT_COUNT; entry_i++) {
+                       /* polling bit, and No Write enable, and address  */
+                       target_cmd = entry_i + CAM_CONTENT_COUNT * j;
+                       target_cmd = target_cmd | BIT(31);
+
+                       /* Check polling bit is clear */
+                       while ((i--) >= 0) {
+                               ulstatus =
+                                   rtl_read_dword(rtlpriv,
+                                                  rtlpriv->cfg->maps[RWCAM]);
+                               if (ulstatus & BIT(31))
+                                       continue;
+                               else
+                                       break;
+                       }
+
+                       rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM],
+                                       target_cmd);
+                       target_val = rtl_read_dword(rtlpriv,
+                                                   rtlpriv->cfg->maps[RCAMO]);
+                       seq_printf(m, "%8.8x ", target_val);
+               }
+       }
+       seq_puts(m, "\n");
+       return 0;
+}
+
+#define RTL_DEBUG_IMPL_CAM_SERIES(page, addr)                  \
+static struct rtl_debugfs_priv rtl_debug_priv_cam_ ##page = {  \
+       .cb_read = rtl_debug_get_cam_register,                  \
+       .cb_data = addr,                                        \
+}
+
+RTL_DEBUG_IMPL_CAM_SERIES(1, 0);
+RTL_DEBUG_IMPL_CAM_SERIES(2, 11);
+RTL_DEBUG_IMPL_CAM_SERIES(3, 22);
+
+static int rtl_debug_get_btcoex(struct seq_file *m, void *v)
+{
+       struct rtl_debugfs_priv *debugfs_priv = m->private;
+       struct rtl_priv *rtlpriv = debugfs_priv->rtlpriv;
+
+       if (rtlpriv->cfg->ops->get_btc_status())
+               rtlpriv->btcoexist.btc_ops->btc_display_bt_coex_info(rtlpriv,
+                                                                    m);
+
+       seq_puts(m, "\n");
+
+       return 0;
+}
+
+static struct rtl_debugfs_priv rtl_debug_priv_btcoex = {
+       .cb_read = rtl_debug_get_btcoex,
+       .cb_data = 0,
+};
+
+static ssize_t rtl_debugfs_set_write_reg(struct file *filp,
+                                        const char __user *buffer,
+                                        size_t count, loff_t *loff)
+{
+       struct rtl_debugfs_priv *debugfs_priv = filp->private_data;
+       struct rtl_priv *rtlpriv = debugfs_priv->rtlpriv;
+       char tmp[32 + 1];
+       int tmp_len;
+       u32 addr, val, len;
+       int num;
+
+       if (count < 3)
+               return -EFAULT;
+
+       tmp_len = (count > sizeof(tmp) - 1 ? sizeof(tmp) - 1 : count);
+
+       if (!buffer || copy_from_user(tmp, buffer, tmp_len))
+               return count;
+
+       tmp[tmp_len] = '\0';
+
+       /* write BB/MAC register */
+       num = sscanf(tmp, "%x %x %x", &addr, &val, &len);
+
+       if (num !=  3)
+               return count;
+
+       switch (len) {
+       case 1:
+               rtl_write_byte(rtlpriv, addr, (u8)val);
+               break;
+       case 2:
+               rtl_write_word(rtlpriv, addr, (u16)val);
+               break;
+       case 4:
+               rtl_write_dword(rtlpriv, addr, val);
+               break;
+       default:
+               /*printk("error write length=%d", len);*/
+               break;
+       }
+
+       return count;
+}
+
+static struct rtl_debugfs_priv rtl_debug_priv_write_reg = {
+       .cb_write = rtl_debugfs_set_write_reg,
+};
+
+static ssize_t rtl_debugfs_set_write_h2c(struct file *filp,
+                                        const char __user *buffer,
+                                        size_t count, loff_t *loff)
+{
+       struct rtl_debugfs_priv *debugfs_priv = filp->private_data;
+       struct rtl_priv *rtlpriv = debugfs_priv->rtlpriv;
+       struct ieee80211_hw *hw = rtlpriv->hw;
+       char tmp[32 + 1];
+       int tmp_len;
+       u8 h2c_len, h2c_data_packed[8];
+       int h2c_data[8];        /* idx 0: cmd */
+       int i;
+
+       if (count < 3)
+               return -EFAULT;
+
+       tmp_len = (count > sizeof(tmp) - 1 ? sizeof(tmp) - 1 : count);
+
+       if (!buffer || copy_from_user(tmp, buffer, tmp_len))
+               return count;
+
+       tmp[tmp_len] = '\0';
+
+       h2c_len = sscanf(tmp, "%X %X %X %X %X %X %X %X",
+                        &h2c_data[0], &h2c_data[1],
+                        &h2c_data[2], &h2c_data[3],
+                        &h2c_data[4], &h2c_data[5],
+                        &h2c_data[6], &h2c_data[7]);
+
+       if (h2c_len <= 0)
+               return count;
+
+       for (i = 0; i < h2c_len; i++)
+               h2c_data_packed[i] = (u8)h2c_data[i];
+
+       rtlpriv->cfg->ops->fill_h2c_cmd(hw, h2c_data_packed[0],
+                                       h2c_len - 1,
+                                       &h2c_data_packed[1]);
+
+       return count;
+}
+
+static struct rtl_debugfs_priv rtl_debug_priv_write_h2c = {
+       .cb_write = rtl_debugfs_set_write_h2c,
+};
+
+static ssize_t rtl_debugfs_set_write_rfreg(struct file *filp,
+                                          const char __user *buffer,
+                                           size_t count, loff_t *loff)
+{
+       struct rtl_debugfs_priv *debugfs_priv = filp->private_data;
+       struct rtl_priv *rtlpriv = debugfs_priv->rtlpriv;
+       struct ieee80211_hw *hw = rtlpriv->hw;
+       char tmp[32 + 1];
+       int tmp_len;
+       int num;
+       int path;
+       u32 addr, bitmask, data;
+
+       if (count < 3)
+               return -EFAULT;
+
+       tmp_len = (count > sizeof(tmp) - 1 ? sizeof(tmp) - 1 : count);
+
+       if (!buffer || copy_from_user(tmp, buffer, tmp_len))
+               return count;
+
+       tmp[tmp_len] = '\0';
+
+       num = sscanf(tmp, "%X %X %X %X",
+                    &path, &addr, &bitmask, &data);
+
+       if (num != 4) {
+               RT_TRACE(rtlpriv, COMP_ERR, DBG_DMESG,
+                        "Format is <path> <addr> <mask> <data>\n");
+               return count;
+       }
+
+       rtl_set_rfreg(hw, path, addr, bitmask, data);
+
+       return count;
+}
+
+static struct rtl_debugfs_priv rtl_debug_priv_write_rfreg = {
+       .cb_write = rtl_debugfs_set_write_rfreg,
+};
+
+static int rtl_debugfs_close(struct inode *inode, struct file *filp)
+{
+       return 0;
+}
+
+static ssize_t rtl_debugfs_common_write(struct file *filp,
+                                       const char __user *buffer,
+                                       size_t count, loff_t *loff)
+{
+       struct rtl_debugfs_priv *debugfs_priv = filp->private_data;
+
+       return debugfs_priv->cb_write(filp, buffer, count, loff);
+}
+
+static const struct file_operations file_ops_common_write = {
+       .owner = THIS_MODULE,
+       .write = rtl_debugfs_common_write,
+       .open = simple_open,
+       .release = rtl_debugfs_close,
+};
+
+#define RTL_DEBUGFS_ADD_CORE(name, mode, fopname)                         \
+       do {                                                               \
+               rtl_debug_priv_ ##name.rtlpriv = rtlpriv;                  \
+               if (!debugfs_create_file(#name, mode,                      \
+                                        parent, &rtl_debug_priv_ ##name,  \
+                                        &file_ops_ ##fopname))            \
+                       pr_err("Unable to initialize debugfs:%s/%s\n",     \
+                              rtlpriv->dbg.debugfs_name,                  \
+                              #name);                                     \
+       } while (0)
+
+#define RTL_DEBUGFS_ADD(name)                                             \
+               RTL_DEBUGFS_ADD_CORE(name, S_IFREG | 0444, common)
+#define RTL_DEBUGFS_ADD_W(name)                                                   \
+               RTL_DEBUGFS_ADD_CORE(name, S_IFREG | 0222, common_write)
+
+void rtl_debug_add_one(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+       struct dentry *parent;
+
+       snprintf(rtlpriv->dbg.debugfs_name, 18, "%pMF", rtlefuse->dev_addr);
+
+       rtlpriv->dbg.debugfs_dir =
+               debugfs_create_dir(rtlpriv->dbg.debugfs_name, debugfs_topdir);
+       if (!rtlpriv->dbg.debugfs_dir) {
+               pr_err("Unable to init debugfs:/%s/%s\n", rtlpriv->cfg->name,
+                      rtlpriv->dbg.debugfs_name);
+               return;
+       }
+
+       parent = rtlpriv->dbg.debugfs_dir;
+
+       RTL_DEBUGFS_ADD(mac_0);
+       RTL_DEBUGFS_ADD(mac_1);
+       RTL_DEBUGFS_ADD(mac_2);
+       RTL_DEBUGFS_ADD(mac_3);
+       RTL_DEBUGFS_ADD(mac_4);
+       RTL_DEBUGFS_ADD(mac_5);
+       RTL_DEBUGFS_ADD(mac_6);
+       RTL_DEBUGFS_ADD(mac_7);
+       RTL_DEBUGFS_ADD(bb_8);
+       RTL_DEBUGFS_ADD(bb_9);
+       RTL_DEBUGFS_ADD(bb_a);
+       RTL_DEBUGFS_ADD(bb_b);
+       RTL_DEBUGFS_ADD(bb_c);
+       RTL_DEBUGFS_ADD(bb_d);
+       RTL_DEBUGFS_ADD(bb_e);
+       RTL_DEBUGFS_ADD(bb_f);
+       RTL_DEBUGFS_ADD(mac_10);
+       RTL_DEBUGFS_ADD(mac_11);
+       RTL_DEBUGFS_ADD(mac_12);
+       RTL_DEBUGFS_ADD(mac_13);
+       RTL_DEBUGFS_ADD(mac_14);
+       RTL_DEBUGFS_ADD(mac_15);
+       RTL_DEBUGFS_ADD(mac_16);
+       RTL_DEBUGFS_ADD(mac_17);
+       RTL_DEBUGFS_ADD(bb_18);
+       RTL_DEBUGFS_ADD(bb_19);
+       RTL_DEBUGFS_ADD(bb_1a);
+       RTL_DEBUGFS_ADD(bb_1b);
+       RTL_DEBUGFS_ADD(bb_1c);
+       RTL_DEBUGFS_ADD(bb_1d);
+       RTL_DEBUGFS_ADD(bb_1e);
+       RTL_DEBUGFS_ADD(bb_1f);
+       RTL_DEBUGFS_ADD(rf_a);
+       RTL_DEBUGFS_ADD(rf_b);
+
+       RTL_DEBUGFS_ADD(cam_1);
+       RTL_DEBUGFS_ADD(cam_2);
+       RTL_DEBUGFS_ADD(cam_3);
+
+       RTL_DEBUGFS_ADD(btcoex);
+
+       RTL_DEBUGFS_ADD_W(write_reg);
+       RTL_DEBUGFS_ADD_W(write_h2c);
+       RTL_DEBUGFS_ADD_W(write_rfreg);
+}
+EXPORT_SYMBOL_GPL(rtl_debug_add_one);
+
+void rtl_debug_remove_one(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       debugfs_remove_recursive(rtlpriv->dbg.debugfs_dir);
+       rtlpriv->dbg.debugfs_dir = NULL;
+}
+EXPORT_SYMBOL_GPL(rtl_debug_remove_one);
+
+void rtl_debugfs_add_topdir(void)
+{
+       debugfs_topdir = debugfs_create_dir("rtlwifi", NULL);
+}
+
+void rtl_debugfs_remove_topdir(void)
+{
+       debugfs_remove_recursive(debugfs_topdir);
+}
+
 #endif
index 9477180..ad6834a 100644 (file)
@@ -219,4 +219,16 @@ static inline void RT_PRINT_DATA(struct rtl_priv *rtlpriv,
 }
 
 #endif
+
+#ifdef CONFIG_RTLWIFI_DEBUG
+void rtl_debug_add_one(struct ieee80211_hw *hw);
+void rtl_debug_remove_one(struct ieee80211_hw *hw);
+void rtl_debugfs_add_topdir(void);
+void rtl_debugfs_remove_topdir(void);
+#else
+#define rtl_debug_add_one(hw)
+#define rtl_debug_remove_one(hw)
+#define rtl_debugfs_add_topdir()
+#define rtl_debugfs_remove_topdir()
+#endif
 #endif
index ef9acd4..35b50be 100644 (file)
@@ -257,11 +257,11 @@ void read_efuse(struct ieee80211_hw *hw, u16 _offset, u16 _size_byte, u8 *pbuf)
                            sizeof(u8), GFP_ATOMIC);
        if (!efuse_tbl)
                return;
-       efuse_word = kzalloc(EFUSE_MAX_WORD_UNIT * sizeof(u16 *), GFP_ATOMIC);
+       efuse_word = kcalloc(EFUSE_MAX_WORD_UNIT, sizeof(u16 *), GFP_ATOMIC);
        if (!efuse_word)
                goto out;
        for (i = 0; i < EFUSE_MAX_WORD_UNIT; i++) {
-               efuse_word[i] = kzalloc(efuse_max_section * sizeof(u16),
+               efuse_word[i] = kcalloc(efuse_max_section, sizeof(u16),
                                        GFP_ATOMIC);
                if (!efuse_word[i])
                        goto done;
index 4013394..c1833a5 100644 (file)
@@ -2309,6 +2309,9 @@ int rtl_pci_probe(struct pci_dev *pdev,
        }
        rtlpriv->mac80211.mac80211_registered = 1;
 
+       /* add for debug */
+       rtl_debug_add_one(hw);
+
        /*init rfkill */
        rtl_init_rfkill(hw);    /* Init PCI sw */
 
@@ -2357,6 +2360,9 @@ void rtl_pci_disconnect(struct pci_dev *pdev)
        wait_for_completion(&rtlpriv->firmware_loading_complete);
        clear_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status);
 
+       /* remove form debug */
+       rtl_debug_remove_one(hw);
+
        /*ieee80211_unregister_hw will call ops_stop */
        if (rtlmac->mac80211_registered == 1) {
                ieee80211_unregister_hw(hw);
index 02811ed..d1cb7d4 100644 (file)
@@ -123,7 +123,7 @@ static void _rtl_rc_rate_set_series(struct rtl_priv *rtlpriv,
                        if (sta && (sta->ht_cap.cap &
                                    IEEE80211_HT_CAP_SUP_WIDTH_20_40))
                                rate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
-                       if (sta && (sta->vht_cap.vht_supported))
+                       if (sta && sta->vht_cap.vht_supported)
                                rate->flags |= IEEE80211_TX_RC_80_MHZ_WIDTH;
                } else {
                        if (mac->bw_40)
@@ -135,8 +135,8 @@ static void _rtl_rc_rate_set_series(struct rtl_priv *rtlpriv,
                if (sgi_20 || sgi_40 || sgi_80)
                        rate->flags |= IEEE80211_TX_RC_SHORT_GI;
                if (sta && sta->ht_cap.ht_supported &&
-                   ((wireless_mode == WIRELESS_MODE_N_5G) ||
-                    (wireless_mode == WIRELESS_MODE_N_24G)))
+                   (wireless_mode == WIRELESS_MODE_N_5G ||
+                    wireless_mode == WIRELESS_MODE_N_24G))
                        rate->flags |= IEEE80211_TX_RC_MCS;
                if (sta && sta->vht_cap.vht_supported &&
                    (wireless_mode == WIRELESS_MODE_AC_5G ||
@@ -216,8 +216,8 @@ static void rtl_tx_status(void *ppriv,
 
        if (sta) {
                /* Check if aggregation has to be enabled for this tid */
-               sta_entry = (struct rtl_sta_info *) sta->drv_priv;
-               if ((sta->ht_cap.ht_supported) &&
+               sta_entry = (struct rtl_sta_info *)sta->drv_priv;
+               if (sta->ht_cap.ht_supported &&
                    !(skb->protocol == cpu_to_be16(ETH_P_PAE))) {
                        if (ieee80211_is_data_qos(fc)) {
                                u8 tid = rtl_get_tid(skb);
@@ -265,11 +265,9 @@ static void *rtl_rate_alloc_sta(void *ppriv,
        struct rtl_priv *rtlpriv = ppriv;
        struct rtl_rate_priv *rate_priv;
 
-       rate_priv = kzalloc(sizeof(struct rtl_rate_priv), gfp);
-       if (!rate_priv) {
-               pr_err("Unable to allocate private rc structure\n");
+       rate_priv = kzalloc(sizeof(*rate_priv), gfp);
+       if (!rate_priv)
                return NULL;
-       }
 
        rtlpriv->rate_priv = rate_priv;
 
index a2eca66..6387451 100644 (file)
@@ -141,6 +141,8 @@ int rtl88e_download_fw(struct ieee80211_hw *hw,
                return 1;
 
        pfwheader = (struct rtlwifi_firmware_header *)rtlhal->pfirmware;
+       rtlhal->fw_version = le16_to_cpu(pfwheader->version);
+       rtlhal->fw_subversion = pfwheader->subversion;
        pfwdata = rtlhal->pfirmware;
        fwsize = rtlhal->fwsize;
        RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
index efa7e12..521039c 100644 (file)
@@ -196,10 +196,12 @@ int rtl8723_download_fw(struct ieee80211_hw *hw,
        enum version_8723e version = rtlhal->version;
        int max_page;
 
-       if (!rtlhal->pfirmware)
+       if (rtlpriv->max_fw_size == 0 || !rtlhal->pfirmware)
                return 1;
 
        pfwheader = (struct rtlwifi_firmware_header *)rtlhal->pfirmware;
+       rtlhal->fw_version = le16_to_cpu(pfwheader->version);
+       rtlhal->fw_subversion = pfwheader->subversion;
        pfwdata = rtlhal->pfirmware;
        fwsize = rtlhal->fwsize;
 
index e2b1479..0b1c543 100644 (file)
@@ -2381,6 +2381,12 @@ struct rtl_works {
        struct work_struct fill_h2c_cmd;
 };
 
+struct rtl_debug {
+       /* add for debug */
+       struct dentry *debugfs_dir;
+       char debugfs_name[20];
+};
+
 #define MIMO_PS_STATIC                 0
 #define MIMO_PS_DYNAMIC                        1
 #define MIMO_PS_NOLIMIT                        3
@@ -2575,6 +2581,8 @@ struct rtl_btc_ops {
        bool (*btc_is_bt_disabled) (struct rtl_priv *rtlpriv);
        void (*btc_special_packet_notify)(struct rtl_priv *rtlpriv,
                                          u8 pkt_type);
+       void (*btc_display_bt_coex_info)(struct rtl_priv *rtlpriv,
+                                        struct seq_file *m);
        void (*btc_record_pwr_mode)(struct rtl_priv *rtlpriv, u8 *buf, u8 len);
        u8   (*btc_get_lps_val)(struct rtl_priv *rtlpriv);
        u8   (*btc_get_rpwm_val)(struct rtl_priv *rtlpriv);
@@ -2649,6 +2657,7 @@ struct rtl_priv {
        /* c2hcmd list for kthread level access */
        struct list_head c2hcmd_list;
 
+       struct rtl_debug dbg;
        int max_fw_size;
 
        /*
index 6d02c66..037defd 100644 (file)
@@ -1200,8 +1200,7 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
                WARN_ON(wl->bss_type != BSS_TYPE_STA_BSS);
 
                enable = bss_conf->arp_addr_cnt == 1 && bss_conf->assoc;
-               wl1251_acx_arp_ip_filter(wl, enable, addr);
-
+               ret = wl1251_acx_arp_ip_filter(wl, enable, addr);
                if (ret < 0)
                        goto out_sleep;
        }
index e949e33..c586bcd 100644 (file)
@@ -211,12 +211,12 @@ static int btt_map_read(struct arena_info *arena, u32 lba, u32 *mapping,
        return ret;
 }
 
-static int btt_log_read_pair(struct arena_info *arena, u32 lane,
-                       struct log_entry *ent)
+static int btt_log_group_read(struct arena_info *arena, u32 lane,
+                       struct log_group *log)
 {
        return arena_read_bytes(arena,
-                       arena->logoff + (2 * lane * LOG_ENT_SIZE), ent,
-                       2 * LOG_ENT_SIZE, 0);
+                       arena->logoff + (lane * LOG_GRP_SIZE), log,
+                       LOG_GRP_SIZE, 0);
 }
 
 static struct dentry *debugfs_root;
@@ -256,6 +256,8 @@ static void arena_debugfs_init(struct arena_info *a, struct dentry *parent,
        debugfs_create_x64("logoff", S_IRUGO, d, &a->logoff);
        debugfs_create_x64("info2off", S_IRUGO, d, &a->info2off);
        debugfs_create_x32("flags", S_IRUGO, d, &a->flags);
+       debugfs_create_u32("log_index_0", S_IRUGO, d, &a->log_index[0]);
+       debugfs_create_u32("log_index_1", S_IRUGO, d, &a->log_index[1]);
 }
 
 static void btt_debugfs_init(struct btt *btt)
@@ -274,6 +276,11 @@ static void btt_debugfs_init(struct btt *btt)
        }
 }
 
+static u32 log_seq(struct log_group *log, int log_idx)
+{
+       return le32_to_cpu(log->ent[log_idx].seq);
+}
+
 /*
  * This function accepts two log entries, and uses the
  * sequence number to find the 'older' entry.
@@ -283,8 +290,10 @@ static void btt_debugfs_init(struct btt *btt)
  *
  * TODO The logic feels a bit kludge-y. make it better..
  */
-static int btt_log_get_old(struct log_entry *ent)
+static int btt_log_get_old(struct arena_info *a, struct log_group *log)
 {
+       int idx0 = a->log_index[0];
+       int idx1 = a->log_index[1];
        int old;
 
        /*
@@ -292,23 +301,23 @@ static int btt_log_get_old(struct log_entry *ent)
         * the next time, the following logic works out to put this
         * (next) entry into [1]
         */
-       if (ent[0].seq == 0) {
-               ent[0].seq = cpu_to_le32(1);
+       if (log_seq(log, idx0) == 0) {
+               log->ent[idx0].seq = cpu_to_le32(1);
                return 0;
        }
 
-       if (ent[0].seq == ent[1].seq)
+       if (log_seq(log, idx0) == log_seq(log, idx1))
                return -EINVAL;
-       if (le32_to_cpu(ent[0].seq) + le32_to_cpu(ent[1].seq) > 5)
+       if (log_seq(log, idx0) + log_seq(log, idx1) > 5)
                return -EINVAL;
 
-       if (le32_to_cpu(ent[0].seq) < le32_to_cpu(ent[1].seq)) {
-               if (le32_to_cpu(ent[1].seq) - le32_to_cpu(ent[0].seq) == 1)
+       if (log_seq(log, idx0) < log_seq(log, idx1)) {
+               if ((log_seq(log, idx1) - log_seq(log, idx0)) == 1)
                        old = 0;
                else
                        old = 1;
        } else {
-               if (le32_to_cpu(ent[0].seq) - le32_to_cpu(ent[1].seq) == 1)
+               if ((log_seq(log, idx0) - log_seq(log, idx1)) == 1)
                        old = 1;
                else
                        old = 0;
@@ -328,17 +337,18 @@ static int btt_log_read(struct arena_info *arena, u32 lane,
 {
        int ret;
        int old_ent, ret_ent;
-       struct log_entry log[2];
+       struct log_group log;
 
-       ret = btt_log_read_pair(arena, lane, log);
+       ret = btt_log_group_read(arena, lane, &log);
        if (ret)
                return -EIO;
 
-       old_ent = btt_log_get_old(log);
+       old_ent = btt_log_get_old(arena, &log);
        if (old_ent < 0 || old_ent > 1) {
                dev_err(to_dev(arena),
                                "log corruption (%d): lane %d seq [%d, %d]\n",
-                       old_ent, lane, log[0].seq, log[1].seq);
+                               old_ent, lane, log.ent[arena->log_index[0]].seq,
+                               log.ent[arena->log_index[1]].seq);
                /* TODO set error state? */
                return -EIO;
        }
@@ -346,7 +356,7 @@ static int btt_log_read(struct arena_info *arena, u32 lane,
        ret_ent = (old_flag ? old_ent : (1 - old_ent));
 
        if (ent != NULL)
-               memcpy(ent, &log[ret_ent], LOG_ENT_SIZE);
+               memcpy(ent, &log.ent[arena->log_index[ret_ent]], LOG_ENT_SIZE);
 
        return ret_ent;
 }
@@ -360,17 +370,13 @@ static int __btt_log_write(struct arena_info *arena, u32 lane,
                        u32 sub, struct log_entry *ent, unsigned long flags)
 {
        int ret;
-       /*
-        * Ignore the padding in log_entry for calculating log_half.
-        * The entry is 'committed' when we write the sequence number,
-        * and we want to ensure that that is the last thing written.
-        * We don't bother writing the padding as that would be extra
-        * media wear and write amplification
-        */
-       unsigned int log_half = (LOG_ENT_SIZE - 2 * sizeof(u64)) / 2;
-       u64 ns_off = arena->logoff + (((2 * lane) + sub) * LOG_ENT_SIZE);
+       u32 group_slot = arena->log_index[sub];
+       unsigned int log_half = LOG_ENT_SIZE / 2;
        void *src = ent;
+       u64 ns_off;
 
+       ns_off = arena->logoff + (lane * LOG_GRP_SIZE) +
+               (group_slot * LOG_ENT_SIZE);
        /* split the 16B write into atomic, durable halves */
        ret = arena_write_bytes(arena, ns_off, src, log_half, flags);
        if (ret)
@@ -453,7 +459,7 @@ static int btt_log_init(struct arena_info *arena)
 {
        size_t logsize = arena->info2off - arena->logoff;
        size_t chunk_size = SZ_4K, offset = 0;
-       struct log_entry log;
+       struct log_entry ent;
        void *zerobuf;
        int ret;
        u32 i;
@@ -485,11 +491,11 @@ static int btt_log_init(struct arena_info *arena)
        }
 
        for (i = 0; i < arena->nfree; i++) {
-               log.lba = cpu_to_le32(i);
-               log.old_map = cpu_to_le32(arena->external_nlba + i);
-               log.new_map = cpu_to_le32(arena->external_nlba + i);
-               log.seq = cpu_to_le32(LOG_SEQ_INIT);
-               ret = __btt_log_write(arena, i, 0, &log, 0);
+               ent.lba = cpu_to_le32(i);
+               ent.old_map = cpu_to_le32(arena->external_nlba + i);
+               ent.new_map = cpu_to_le32(arena->external_nlba + i);
+               ent.seq = cpu_to_le32(LOG_SEQ_INIT);
+               ret = __btt_log_write(arena, i, 0, &ent, 0);
                if (ret)
                        goto free;
        }
@@ -594,6 +600,123 @@ static int btt_freelist_init(struct arena_info *arena)
        return 0;
 }
 
+static bool ent_is_padding(struct log_entry *ent)
+{
+       return (ent->lba == 0) && (ent->old_map == 0) && (ent->new_map == 0)
+               && (ent->seq == 0);
+}
+
+/*
+ * Detecting valid log indices: We read a log group (see the comments in btt.h
+ * for a description of a 'log_group' and its 'slots'), and iterate over its
+ * four slots. We expect that a padding slot will be all-zeroes, and use this
+ * to detect a padding slot vs. an actual entry.
+ *
+ * If a log_group is in the initial state, i.e. hasn't been used since the
+ * creation of this BTT layout, it will have three of the four slots with
+ * zeroes. We skip over these log_groups for the detection of log_index. If
+ * all log_groups are in the initial state (i.e. the BTT has never been
+ * written to), it is safe to assume the 'new format' of log entries in slots
+ * (0, 1).
+ */
+static int log_set_indices(struct arena_info *arena)
+{
+       bool idx_set = false, initial_state = true;
+       int ret, log_index[2] = {-1, -1};
+       u32 i, j, next_idx = 0;
+       struct log_group log;
+       u32 pad_count = 0;
+
+       for (i = 0; i < arena->nfree; i++) {
+               ret = btt_log_group_read(arena, i, &log);
+               if (ret < 0)
+                       return ret;
+
+               for (j = 0; j < 4; j++) {
+                       if (!idx_set) {
+                               if (ent_is_padding(&log.ent[j])) {
+                                       pad_count++;
+                                       continue;
+                               } else {
+                                       /* Skip if index has been recorded */
+                                       if ((next_idx == 1) &&
+                                               (j == log_index[0]))
+                                               continue;
+                                       /* valid entry, record index */
+                                       log_index[next_idx] = j;
+                                       next_idx++;
+                               }
+                               if (next_idx == 2) {
+                                       /* two valid entries found */
+                                       idx_set = true;
+                               } else if (next_idx > 2) {
+                                       /* too many valid indices */
+                                       return -ENXIO;
+                               }
+                       } else {
+                               /*
+                                * once the indices have been set, just verify
+                                * that all subsequent log groups are either in
+                                * their initial state or follow the same
+                                * indices.
+                                */
+                               if (j == log_index[0]) {
+                                       /* entry must be 'valid' */
+                                       if (ent_is_padding(&log.ent[j]))
+                                               return -ENXIO;
+                               } else if (j == log_index[1]) {
+                                       ;
+                                       /*
+                                        * log_index[1] can be padding if the
+                                        * lane never got used and it is still
+                                        * in the initial state (three 'padding'
+                                        * entries)
+                                        */
+                               } else {
+                                       /* entry must be invalid (padding) */
+                                       if (!ent_is_padding(&log.ent[j]))
+                                               return -ENXIO;
+                               }
+                       }
+               }
+               /*
+                * If any of the log_groups have more than one valid,
+                * non-padding entry, then the we are no longer in the
+                * initial_state
+                */
+               if (pad_count < 3)
+                       initial_state = false;
+               pad_count = 0;
+       }
+
+       if (!initial_state && !idx_set)
+               return -ENXIO;
+
+       /*
+        * If all the entries in the log were in the initial state,
+        * assume new padding scheme
+        */
+       if (initial_state)
+               log_index[1] = 1;
+
+       /*
+        * Only allow the known permutations of log/padding indices,
+        * i.e. (0, 1), and (0, 2)
+        */
+       if ((log_index[0] == 0) && ((log_index[1] == 1) || (log_index[1] == 2)))
+               ; /* known index possibilities */
+       else {
+               dev_err(to_dev(arena), "Found an unknown padding scheme\n");
+               return -ENXIO;
+       }
+
+       arena->log_index[0] = log_index[0];
+       arena->log_index[1] = log_index[1];
+       dev_dbg(to_dev(arena), "log_index_0 = %d\n", log_index[0]);
+       dev_dbg(to_dev(arena), "log_index_1 = %d\n", log_index[1]);
+       return 0;
+}
+
 static int btt_rtt_init(struct arena_info *arena)
 {
        arena->rtt = kcalloc(arena->nfree, sizeof(u32), GFP_KERNEL);
@@ -650,8 +773,7 @@ static struct arena_info *alloc_arena(struct btt *btt, size_t size,
        available -= 2 * BTT_PG_SIZE;
 
        /* The log takes a fixed amount of space based on nfree */
-       logsize = roundup(2 * arena->nfree * sizeof(struct log_entry),
-                               BTT_PG_SIZE);
+       logsize = roundup(arena->nfree * LOG_GRP_SIZE, BTT_PG_SIZE);
        available -= logsize;
 
        /* Calculate optimal split between map and data area */
@@ -668,6 +790,10 @@ static struct arena_info *alloc_arena(struct btt *btt, size_t size,
        arena->mapoff = arena->dataoff + datasize;
        arena->logoff = arena->mapoff + mapsize;
        arena->info2off = arena->logoff + logsize;
+
+       /* Default log indices are (0,1) */
+       arena->log_index[0] = 0;
+       arena->log_index[1] = 1;
        return arena;
 }
 
@@ -758,6 +884,13 @@ static int discover_arenas(struct btt *btt)
                arena->external_lba_start = cur_nlba;
                parse_arena_meta(arena, super, cur_off);
 
+               ret = log_set_indices(arena);
+               if (ret) {
+                       dev_err(to_dev(arena),
+                               "Unable to deduce log/padding indices\n");
+                       goto out;
+               }
+
                mutex_init(&arena->err_lock);
                ret = btt_freelist_init(arena);
                if (ret)
index 578c205..db3cb6d 100644 (file)
@@ -27,6 +27,7 @@
 #define MAP_ERR_MASK (1 << MAP_ERR_SHIFT)
 #define MAP_LBA_MASK (~((1 << MAP_TRIM_SHIFT) | (1 << MAP_ERR_SHIFT)))
 #define MAP_ENT_NORMAL 0xC0000000
+#define LOG_GRP_SIZE sizeof(struct log_group)
 #define LOG_ENT_SIZE sizeof(struct log_entry)
 #define ARENA_MIN_SIZE (1UL << 24)     /* 16 MB */
 #define ARENA_MAX_SIZE (1ULL << 39)    /* 512 GB */
@@ -50,12 +51,52 @@ enum btt_init_state {
        INIT_READY
 };
 
+/*
+ * A log group represents one log 'lane', and consists of four log entries.
+ * Two of the four entries are valid entries, and the remaining two are
+ * padding. Due to an old bug in the padding location, we need to perform a
+ * test to determine the padding scheme being used, and use that scheme
+ * thereafter.
+ *
+ * In kernels prior to 4.15, 'log group' would have actual log entries at
+ * indices (0, 2) and padding at indices (1, 3), where as the correct/updated
+ * format has log entries at indices (0, 1) and padding at indices (2, 3).
+ *
+ * Old (pre 4.15) format:
+ * +-----------------+-----------------+
+ * |      ent[0]     |      ent[1]     |
+ * |       16B       |       16B       |
+ * | lba/old/new/seq |       pad       |
+ * +-----------------------------------+
+ * |      ent[2]     |      ent[3]     |
+ * |       16B       |       16B       |
+ * | lba/old/new/seq |       pad       |
+ * +-----------------+-----------------+
+ *
+ * New format:
+ * +-----------------+-----------------+
+ * |      ent[0]     |      ent[1]     |
+ * |       16B       |       16B       |
+ * | lba/old/new/seq | lba/old/new/seq |
+ * +-----------------------------------+
+ * |      ent[2]     |      ent[3]     |
+ * |       16B       |       16B       |
+ * |       pad       |       pad       |
+ * +-----------------+-----------------+
+ *
+ * We detect during start-up which format is in use, and set
+ * arena->log_index[(0, 1)] with the detected format.
+ */
+
 struct log_entry {
        __le32 lba;
        __le32 old_map;
        __le32 new_map;
        __le32 seq;
-       __le64 padding[2];
+};
+
+struct log_group {
+       struct log_entry ent[4];
 };
 
 struct btt_sb {
@@ -125,6 +166,8 @@ struct aligned_lock {
  * @list:              List head for list of arenas
  * @debugfs_dir:       Debugfs dentry
  * @flags:             Arena flags - may signify error states.
+ * @err_lock:          Mutex for synchronizing error clearing.
+ * @log_index:         Indices of the valid log entries in a log_group
  *
  * arena_info is a per-arena handle. Once an arena is narrowed down for an
  * IO, this struct is passed around for the duration of the IO.
@@ -157,6 +200,7 @@ struct arena_info {
        /* Arena flags */
        u32 flags;
        struct mutex err_lock;
+       int log_index[2];
 };
 
 /**
@@ -176,6 +220,7 @@ struct arena_info {
  * @init_lock:         Mutex used for the BTT initialization
  * @init_state:                Flag describing the initialization state for the BTT
  * @num_arenas:                Number of arenas in the BTT instance
+ * @phys_bb:           Pointer to the namespace's badblocks structure
  */
 struct btt {
        struct gendisk *btt_disk;
index 65cc171..2adada1 100644 (file)
@@ -364,9 +364,9 @@ struct device *nd_pfn_create(struct nd_region *nd_region)
 int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
 {
        u64 checksum, offset;
-       unsigned long align;
        enum nd_pfn_mode mode;
        struct nd_namespace_io *nsio;
+       unsigned long align, start_pad;
        struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
        struct nd_namespace_common *ndns = nd_pfn->ndns;
        const u8 *parent_uuid = nd_dev_to_uuid(&ndns->dev);
@@ -410,6 +410,7 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
 
        align = le32_to_cpu(pfn_sb->align);
        offset = le64_to_cpu(pfn_sb->dataoff);
+       start_pad = le32_to_cpu(pfn_sb->start_pad);
        if (align == 0)
                align = 1UL << ilog2(offset);
        mode = le32_to_cpu(pfn_sb->mode);
@@ -468,7 +469,7 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
                return -EBUSY;
        }
 
-       if ((align && !IS_ALIGNED(offset, align))
+       if ((align && !IS_ALIGNED(nsio->res.start + offset + start_pad, align))
                        || !IS_ALIGNED(offset, PAGE_SIZE)) {
                dev_err(&nd_pfn->dev,
                                "bad offset: %#llx dax disabled align: %#lx\n",
@@ -582,6 +583,12 @@ static struct vmem_altmap *__nvdimm_setup_pfn(struct nd_pfn *nd_pfn,
        return altmap;
 }
 
+static u64 phys_pmem_align_down(struct nd_pfn *nd_pfn, u64 phys)
+{
+       return min_t(u64, PHYS_SECTION_ALIGN_DOWN(phys),
+                       ALIGN_DOWN(phys, nd_pfn->align));
+}
+
 static int nd_pfn_init(struct nd_pfn *nd_pfn)
 {
        u32 dax_label_reserve = is_nd_dax(&nd_pfn->dev) ? SZ_128K : 0;
@@ -637,13 +644,16 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
        start = nsio->res.start;
        size = PHYS_SECTION_ALIGN_UP(start + size) - start;
        if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM,
-                               IORES_DESC_NONE) == REGION_MIXED) {
+                               IORES_DESC_NONE) == REGION_MIXED
+                       || !IS_ALIGNED(start + resource_size(&nsio->res),
+                               nd_pfn->align)) {
                size = resource_size(&nsio->res);
-               end_trunc = start + size - PHYS_SECTION_ALIGN_DOWN(start + size);
+               end_trunc = start + size - phys_pmem_align_down(nd_pfn,
+                               start + size);
        }
 
        if (start_pad + end_trunc)
-               dev_info(&nd_pfn->dev, "%s section collision, truncate %d bytes\n",
+               dev_info(&nd_pfn->dev, "%s alignment collision, truncate %d bytes\n",
                                dev_name(&ndns->dev), start_pad + end_trunc);
 
        /*
index f837d66..1e46e60 100644 (file)
@@ -1287,7 +1287,7 @@ static void nvme_config_discard(struct nvme_ctrl *ctrl,
        BUILD_BUG_ON(PAGE_SIZE / sizeof(struct nvme_dsm_range) <
                        NVME_DSM_MAX_RANGES);
 
-       queue->limits.discard_alignment = size;
+       queue->limits.discard_alignment = 0;
        queue->limits.discard_granularity = size;
 
        blk_queue_max_discard_sectors(queue, UINT_MAX);
@@ -1705,7 +1705,8 @@ static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
                blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors);
                blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX));
        }
-       if (ctrl->quirks & NVME_QUIRK_STRIPE_SIZE)
+       if ((ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) &&
+           is_power_of_2(ctrl->max_hw_sectors))
                blk_queue_chunk_sectors(q, ctrl->max_hw_sectors);
        blk_queue_virt_boundary(q, ctrl->page_size - 1);
        if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
@@ -2869,7 +2870,6 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
 
        blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift);
        nvme_set_queue_limits(ctrl, ns->queue);
-       nvme_setup_streams_ns(ctrl, ns);
 
        id = nvme_identify_ns(ctrl, nsid);
        if (!id)
@@ -2880,6 +2880,7 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
 
        if (nvme_init_ns_head(ns, nsid, id, &new))
                goto out_free_id;
+       nvme_setup_streams_ns(ctrl, ns);
        
 #ifdef CONFIG_NVME_MULTIPATH
        /*
@@ -2965,8 +2966,6 @@ static void nvme_ns_remove(struct nvme_ns *ns)
                return;
 
        if (ns->disk && ns->disk->flags & GENHD_FL_UP) {
-               if (blk_get_integrity(ns->disk))
-                       blk_integrity_unregister(ns->disk);
                nvme_mpath_remove_disk_links(ns);
                sysfs_remove_group(&disk_to_dev(ns->disk)->kobj,
                                        &nvme_ns_id_attr_group);
@@ -2974,6 +2973,8 @@ static void nvme_ns_remove(struct nvme_ns *ns)
                        nvme_nvm_unregister_sysfs(ns);
                del_gendisk(ns->disk);
                blk_cleanup_queue(ns->queue);
+               if (blk_get_integrity(ns->disk))
+                       blk_integrity_unregister(ns->disk);
        }
 
        mutex_lock(&ns->ctrl->subsys->lock);
index 0a8af4d..794e66e 100644 (file)
@@ -3221,7 +3221,6 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
 
                /* initiate nvme ctrl ref counting teardown */
                nvme_uninit_ctrl(&ctrl->ctrl);
-               nvme_put_ctrl(&ctrl->ctrl);
 
                /* Remove core ctrl ref. */
                nvme_put_ctrl(&ctrl->ctrl);
index f4c7329..1b9ef35 100644 (file)
@@ -77,9 +77,10 @@ static int of_mdiobus_register_phy(struct mii_bus *mdio,
        if (of_property_read_bool(child, "broken-turn-around"))
                mdio->phy_ignore_ta_mask |= 1 << addr;
 
-       of_property_read_u32(child, "reset-delay-us", &phy->mdio.reset_delay);
-       of_property_read_u32(child, "reset-post-delay-us",
-                            &phy->mdio.reset_post_delay);
+       of_property_read_u32(child, "reset-assert-us",
+                            &phy->mdio.reset_assert_delay);
+       of_property_read_u32(child, "reset-deassert-us",
+                            &phy->mdio.reset_deassert_delay);
 
        /* Associate the OF node with the device structure so it
         * can be looked up later */
index a25fed5..41b740a 100644 (file)
@@ -1692,3 +1692,36 @@ void lba_set_iregs(struct parisc_device *lba, u32 ibase, u32 imask)
        iounmap(base_addr);
 }
 
+
+/*
+ * The design of the Diva management card in rp34x0 machines (rp3410, rp3440)
+ * seems rushed, so that many built-in components simply don't work.
+ * The following quirks disable the serial AUX port and the built-in ATI RV100
+ * Radeon 7000 graphics card which both don't have any external connectors and
+ * thus are useless, and even worse, e.g. the AUX port occupies ttyS0 and as
+ * such makes those machines the only PARISC machines on which we can't use
+ * ttyS0 as boot console.
+ */
+static void quirk_diva_ati_card(struct pci_dev *dev)
+{
+       if (dev->subsystem_vendor != PCI_VENDOR_ID_HP ||
+           dev->subsystem_device != 0x1292)
+               return;
+
+       dev_info(&dev->dev, "Hiding Diva built-in ATI card");
+       dev->device = 0;
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_QY,
+       quirk_diva_ati_card);
+
+static void quirk_diva_aux_disable(struct pci_dev *dev)
+{
+       if (dev->subsystem_vendor != PCI_VENDOR_ID_HP ||
+           dev->subsystem_device != 0x1291)
+               return;
+
+       dev_info(&dev->dev, "Hiding Diva built-in AUX serial device");
+       dev->device = 0;
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_DIVA_AUX,
+       quirk_diva_aux_disable);
index 945099d..14fd865 100644 (file)
@@ -1012,7 +1012,12 @@ static int pci_pm_thaw_noirq(struct device *dev)
        if (pci_has_legacy_pm_support(pci_dev))
                return pci_legacy_resume_early(dev);
 
-       pci_update_current_state(pci_dev, PCI_D0);
+       /*
+        * pci_restore_state() requires the device to be in D0 (because of MSI
+        * restoration among other things), so force it into D0 in case the
+        * driver's "freeze" callbacks put it into a low-power state directly.
+        */
+       pci_set_power_state(pci_dev, PCI_D0);
        pci_restore_state(pci_dev);
 
        if (drv && drv->pm && drv->pm->thaw_noirq)
index bdedb63..4471fd9 100644 (file)
@@ -1620,6 +1620,22 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
                        clear_bit(i, chip->irq.valid_mask);
        }
 
+       /*
+        * The same set of machines in chv_no_valid_mask[] have incorrectly
+        * configured GPIOs that generate spurious interrupts so we use
+        * this same list to apply another quirk for them.
+        *
+        * See also https://bugzilla.kernel.org/show_bug.cgi?id=197953.
+        */
+       if (!need_valid_mask) {
+               /*
+                * Mask all interrupts the community is able to generate
+                * but leave the ones that can only generate GPEs unmasked.
+                */
+               chv_writel(GENMASK(31, pctrl->community->nirqs),
+                          pctrl->regs + CHV_INTMASK);
+       }
+
        /* Clear all interrupts */
        chv_writel(0xffff, pctrl->regs + CHV_INTSTAT);
 
index a782a20..c7e484f 100644 (file)
@@ -91,9 +91,6 @@ config QETH_L3
          To compile as a module choose M. The module name is qeth_l3.
          If unsure, choose Y.
 
-config QETH_IPV6
-       def_bool y if (QETH_L3 = IPV6) || (QETH_L3 && IPV6 = 'y')
-
 config CCWGROUP
        tristate
        default (LCS || CTCM || QETH)
index 92ae84a..0ee8f33 100644 (file)
@@ -756,18 +756,14 @@ lcs_get_lancmd(struct lcs_card *card, int count)
 static void
 lcs_get_reply(struct lcs_reply *reply)
 {
-       WARN_ON(atomic_read(&reply->refcnt) <= 0);
-       atomic_inc(&reply->refcnt);
+       refcount_inc(&reply->refcnt);
 }
 
 static void
 lcs_put_reply(struct lcs_reply *reply)
 {
-        WARN_ON(atomic_read(&reply->refcnt) <= 0);
-        if (atomic_dec_and_test(&reply->refcnt)) {
+       if (refcount_dec_and_test(&reply->refcnt))
                kfree(reply);
-       }
-
 }
 
 static struct lcs_reply *
@@ -780,7 +776,7 @@ lcs_alloc_reply(struct lcs_cmd *cmd)
        reply = kzalloc(sizeof(struct lcs_reply), GFP_ATOMIC);
        if (!reply)
                return NULL;
-       atomic_set(&reply->refcnt,1);
+       refcount_set(&reply->refcnt, 1);
        reply->sequence_no = cmd->sequence_no;
        reply->received = 0;
        reply->rc = 0;
index fbc8b90..bd52caa 100644 (file)
@@ -5,6 +5,7 @@
 #include <linux/netdevice.h>
 #include <linux/skbuff.h>
 #include <linux/workqueue.h>
+#include <linux/refcount.h>
 #include <asm/ccwdev.h>
 
 #define LCS_DBF_TEXT(level, name, text) \
@@ -271,7 +272,7 @@ struct lcs_buffer {
 struct lcs_reply {
        struct list_head list;
        __u16 sequence_no;
-       atomic_t refcnt;
+       refcount_t refcnt;
        /* Callback for completion notification. */
        void (*callback)(struct lcs_card *, struct lcs_cmd *);
        wait_queue_head_t wait_q;
index badf42a..db42107 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/ethtool.h>
 #include <linux/hashtable.h>
 #include <linux/ip.h>
+#include <linux/refcount.h>
 
 #include <net/ipv6.h>
 #include <net/if_inet6.h>
@@ -296,8 +297,23 @@ struct qeth_hdr_layer3 {
        __u8  ext_flags;
        __u16 vlan_id;
        __u16 frame_offset;
-       __u8  dest_addr[16];
-} __attribute__ ((packed));
+       union {
+               /* TX: */
+               u8 ipv6_addr[16];
+               struct ipv4 {
+                       u8 res[12];
+                       u32 addr;
+               } ipv4;
+               /* RX: */
+               struct rx {
+                       u8 res1[2];
+                       u8 src_mac[6];
+                       u8 res2[4];
+                       u16 vlan_id;
+                       u8 res3[2];
+               } rx;
+       } next_hop;
+};
 
 struct qeth_hdr_layer2 {
        __u8 id;
@@ -504,12 +520,6 @@ struct qeth_qdio_info {
        int default_out_queue;
 };
 
-#define QETH_ETH_MAC_V4      0x0100 /* like v4 */
-#define QETH_ETH_MAC_V6      0x3333 /* like v6 */
-/* tr mc mac is longer, but that will be enough to detect mc frames */
-#define QETH_TR_MAC_NC       0xc000 /* non-canonical */
-#define QETH_TR_MAC_C        0x0300 /* canonical */
-
 /**
  * buffer stuff for read channel
  */
@@ -632,7 +642,7 @@ struct qeth_reply {
        int rc;
        void *param;
        struct qeth_card *card;
-       atomic_t refcnt;
+       refcount_t refcnt;
 };
 
 struct qeth_card_blkt {
@@ -846,14 +856,16 @@ static inline int qeth_get_micros(void)
 
 static inline int qeth_get_ip_version(struct sk_buff *skb)
 {
-       __be16 *p = &((struct ethhdr *)skb->data)->h_proto;
+       struct vlan_ethhdr *veth = vlan_eth_hdr(skb);
+       __be16 prot = veth->h_vlan_proto;
+
+       if (prot == htons(ETH_P_8021Q))
+               prot = veth->h_vlan_encapsulated_proto;
 
-       if (be16_to_cpu(*p) == ETH_P_8021Q)
-               p += 2;
-       switch (be16_to_cpu(*p)) {
-       case ETH_P_IPV6:
+       switch (prot) {
+       case htons(ETH_P_IPV6):
                return 6;
-       case ETH_P_IP:
+       case htons(ETH_P_IP):
                return 4;
        default:
                return 0;
index 6c81520..6abd3bc 100644 (file)
@@ -36,6 +36,7 @@
 #include <asm/diag.h>
 #include <asm/cio.h>
 #include <asm/ccwdev.h>
+#include <asm/cpcmd.h>
 
 #include "qeth_core.h"
 
@@ -564,7 +565,7 @@ static struct qeth_reply *qeth_alloc_reply(struct qeth_card *card)
 
        reply = kzalloc(sizeof(struct qeth_reply), GFP_ATOMIC);
        if (reply) {
-               atomic_set(&reply->refcnt, 1);
+               refcount_set(&reply->refcnt, 1);
                atomic_set(&reply->received, 0);
                reply->card = card;
        }
@@ -573,14 +574,12 @@ static struct qeth_reply *qeth_alloc_reply(struct qeth_card *card)
 
 static void qeth_get_reply(struct qeth_reply *reply)
 {
-       WARN_ON(atomic_read(&reply->refcnt) <= 0);
-       atomic_inc(&reply->refcnt);
+       refcount_inc(&reply->refcnt);
 }
 
 static void qeth_put_reply(struct qeth_reply *reply)
 {
-       WARN_ON(atomic_read(&reply->refcnt) <= 0);
-       if (atomic_dec_and_test(&reply->refcnt))
+       if (refcount_dec_and_test(&reply->refcnt))
                kfree(reply);
 }
 
@@ -1717,23 +1716,87 @@ static void qeth_configure_unitaddr(struct qeth_card *card, char *prcd)
                               (prcd[0x11] == _ascebc['M']));
 }
 
+static enum qeth_discipline_id qeth_vm_detect_layer(struct qeth_card *card)
+{
+       enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED;
+       struct diag26c_vnic_resp *response = NULL;
+       struct diag26c_vnic_req *request = NULL;
+       struct ccw_dev_id id;
+       char userid[80];
+       int rc = 0;
+
+       QETH_DBF_TEXT(SETUP, 2, "vmlayer");
+
+       cpcmd("QUERY USERID", userid, sizeof(userid), &rc);
+       if (rc)
+               goto out;
+
+       request = kzalloc(sizeof(*request), GFP_KERNEL | GFP_DMA);
+       response = kzalloc(sizeof(*response), GFP_KERNEL | GFP_DMA);
+       if (!request || !response) {
+               rc = -ENOMEM;
+               goto out;
+       }
+
+       ccw_device_get_id(CARD_RDEV(card), &id);
+       request->resp_buf_len = sizeof(*response);
+       request->resp_version = DIAG26C_VERSION6_VM65918;
+       request->req_format = DIAG26C_VNIC_INFO;
+       ASCEBC(userid, 8);
+       memcpy(&request->sys_name, userid, 8);
+       request->devno = id.devno;
+
+       QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
+       rc = diag26c(request, response, DIAG26C_PORT_VNIC);
+       QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
+       if (rc)
+               goto out;
+       QETH_DBF_HEX(CTRL, 2, response, sizeof(*response));
+
+       if (request->resp_buf_len < sizeof(*response) ||
+           response->version != request->resp_version) {
+               rc = -EIO;
+               goto out;
+       }
+
+       if (response->protocol == VNIC_INFO_PROT_L2)
+               disc = QETH_DISCIPLINE_LAYER2;
+       else if (response->protocol == VNIC_INFO_PROT_L3)
+               disc = QETH_DISCIPLINE_LAYER3;
+
+out:
+       kfree(response);
+       kfree(request);
+       if (rc)
+               QETH_DBF_TEXT_(SETUP, 2, "err%x", rc);
+       return disc;
+}
+
 /* Determine whether the device requires a specific layer discipline */
 static enum qeth_discipline_id qeth_enforce_discipline(struct qeth_card *card)
 {
+       enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED;
+
        if (card->info.type == QETH_CARD_TYPE_OSM ||
-           card->info.type == QETH_CARD_TYPE_OSN) {
+           card->info.type == QETH_CARD_TYPE_OSN)
+               disc = QETH_DISCIPLINE_LAYER2;
+       else if (card->info.guestlan)
+               disc = (card->info.type == QETH_CARD_TYPE_IQD) ?
+                               QETH_DISCIPLINE_LAYER3 :
+                               qeth_vm_detect_layer(card);
+
+       switch (disc) {
+       case QETH_DISCIPLINE_LAYER2:
                QETH_DBF_TEXT(SETUP, 3, "force l2");
-               return QETH_DISCIPLINE_LAYER2;
-       }
-
-       /* virtual HiperSocket is L3 only: */
-       if (card->info.guestlan && card->info.type == QETH_CARD_TYPE_IQD) {
+               break;
+       case QETH_DISCIPLINE_LAYER3:
                QETH_DBF_TEXT(SETUP, 3, "force l3");
-               return QETH_DISCIPLINE_LAYER3;
+               break;
+       default:
+               QETH_DBF_TEXT(SETUP, 3, "force no");
        }
 
-       QETH_DBF_TEXT(SETUP, 3, "force no");
-       return QETH_DISCIPLINE_UNDETERMINED;
+       return disc;
 }
 
 static void qeth_configure_blkt_default(struct qeth_card *card, char *prcd)
@@ -4218,9 +4281,8 @@ static int qeth_setadpparms_change_macaddr_cb(struct qeth_card *card,
        cmd = (struct qeth_ipa_cmd *) data;
        if (!card->options.layer2 ||
            !(card->info.mac_bits & QETH_LAYER2_MAC_READ)) {
-               memcpy(card->dev->dev_addr,
-                      &cmd->data.setadapterparms.data.change_addr.addr,
-                      OSA_ADDR_LEN);
+               ether_addr_copy(card->dev->dev_addr,
+                               cmd->data.setadapterparms.data.change_addr.addr);
                card->info.mac_bits |= QETH_LAYER2_MAC_READ;
        }
        qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd);
@@ -4242,9 +4304,9 @@ int qeth_setadpparms_change_macaddr(struct qeth_card *card)
                return -ENOMEM;
        cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
        cmd->data.setadapterparms.data.change_addr.cmd = CHANGE_ADDR_READ_MAC;
-       cmd->data.setadapterparms.data.change_addr.addr_size = OSA_ADDR_LEN;
-       memcpy(&cmd->data.setadapterparms.data.change_addr.addr,
-              card->dev->dev_addr, OSA_ADDR_LEN);
+       cmd->data.setadapterparms.data.change_addr.addr_size = ETH_ALEN;
+       ether_addr_copy(cmd->data.setadapterparms.data.change_addr.addr,
+                       card->dev->dev_addr);
        rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_change_macaddr_cb,
                               NULL);
        return rc;
@@ -4789,9 +4851,12 @@ int qeth_vm_request_mac(struct qeth_card *card)
        request->op_code = DIAG26C_GET_MAC;
        request->devno = id.devno;
 
+       QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
        rc = diag26c(request, response, DIAG26C_MAC_SERVICES);
+       QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
        if (rc)
                goto out;
+       QETH_DBF_HEX(CTRL, 2, response, sizeof(*response));
 
        if (request->resp_buf_len < sizeof(*response) ||
            response->version != request->resp_version) {
@@ -5386,6 +5451,13 @@ out:
 }
 EXPORT_SYMBOL_GPL(qeth_poll);
 
+static int qeth_setassparms_inspect_rc(struct qeth_ipa_cmd *cmd)
+{
+       if (!cmd->hdr.return_code)
+               cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
+       return cmd->hdr.return_code;
+}
+
 int qeth_setassparms_cb(struct qeth_card *card,
                        struct qeth_reply *reply, unsigned long data)
 {
@@ -6242,7 +6314,7 @@ static int qeth_ipa_checksum_run_cmd_cb(struct qeth_card *card,
                                (struct qeth_checksum_cmd *)reply->param;
 
        QETH_CARD_TEXT(card, 4, "chkdoccb");
-       if (cmd->hdr.return_code)
+       if (qeth_setassparms_inspect_rc(cmd))
                return 0;
 
        memset(chksum_cb, 0, sizeof(*chksum_cb));
index ff6877f..619f897 100644 (file)
@@ -10,6 +10,7 @@
 #define __QETH_CORE_MPC_H__
 
 #include <asm/qeth.h>
+#include <uapi/linux/if_ether.h>
 
 #define IPA_PDU_HEADER_SIZE    0x40
 #define QETH_IPA_PDU_LEN_TOTAL(buffer) (buffer + 0x0e)
@@ -25,7 +26,6 @@ extern unsigned char IPA_PDU_HEADER[];
 #define QETH_SEQ_NO_LENGTH     4
 #define QETH_MPC_TOKEN_LENGTH  4
 #define QETH_MCL_LENGTH                4
-#define OSA_ADDR_LEN           6
 
 #define QETH_TIMEOUT           (10 * HZ)
 #define QETH_IPA_TIMEOUT       (45 * HZ)
@@ -416,12 +416,11 @@ struct qeth_query_cmds_supp {
 } __attribute__ ((packed));
 
 struct qeth_change_addr {
-       __u32 cmd;
-       __u32 addr_size;
-       __u32 no_macs;
-       __u8 addr[OSA_ADDR_LEN];
-} __attribute__ ((packed));
-
+       u32 cmd;
+       u32 addr_size;
+       u32 no_macs;
+       u8 addr[ETH_ALEN];
+};
 
 struct qeth_snmp_cmd {
        __u8  token[16];
index 09b1c4e..f213005 100644 (file)
@@ -22,8 +22,7 @@ int qeth_l2_vnicc_get_timeout(struct qeth_card *card, u32 *timeout);
 bool qeth_l2_vnicc_is_in_use(struct qeth_card *card);
 
 struct qeth_mac {
-       u8 mac_addr[OSA_ADDR_LEN];
-       u8 is_uc:1;
+       u8 mac_addr[ETH_ALEN];
        u8 disp_flag:2;
        struct hlist_node hnode;
 };
index 5863ea1..7f23644 100644 (file)
@@ -109,8 +109,8 @@ static int qeth_l2_send_setdelmac(struct qeth_card *card, __u8 *mac,
        if (!iob)
                return -ENOMEM;
        cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
-       cmd->data.setdelmac.mac_length = OSA_ADDR_LEN;
-       memcpy(&cmd->data.setdelmac.mac, mac, OSA_ADDR_LEN);
+       cmd->data.setdelmac.mac_length = ETH_ALEN;
+       ether_addr_copy(cmd->data.setdelmac.mac, mac);
        return qeth_setdelmac_makerc(card, qeth_send_ipa_cmd(card, iob,
                                           NULL, NULL));
 }
@@ -123,7 +123,7 @@ static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac)
        rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC);
        if (rc == 0) {
                card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED;
-               memcpy(card->dev->dev_addr, mac, OSA_ADDR_LEN);
+               ether_addr_copy(card->dev->dev_addr, mac);
                dev_info(&card->gdev->dev,
                        "MAC address %pM successfully registered on device %s\n",
                        card->dev->dev_addr, card->dev->name);
@@ -156,54 +156,37 @@ static int qeth_l2_send_delmac(struct qeth_card *card, __u8 *mac)
        return rc;
 }
 
-static int qeth_l2_send_setgroupmac(struct qeth_card *card, __u8 *mac)
+static int qeth_l2_write_mac(struct qeth_card *card, u8 *mac)
 {
+       enum qeth_ipa_cmds cmd = is_multicast_ether_addr_64bits(mac) ?
+                                       IPA_CMD_SETGMAC : IPA_CMD_SETVMAC;
        int rc;
 
-       QETH_CARD_TEXT(card, 2, "L2Sgmac");
-       rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETGMAC);
+       QETH_CARD_TEXT(card, 2, "L2Wmac");
+       rc = qeth_l2_send_setdelmac(card, mac, cmd);
        if (rc == -EEXIST)
-               QETH_DBF_MESSAGE(2, "Group MAC %pM already existing on %s\n",
-                       mac, QETH_CARD_IFNAME(card));
+               QETH_DBF_MESSAGE(2, "MAC %pM already registered on %s\n",
+                                mac, QETH_CARD_IFNAME(card));
        else if (rc)
-               QETH_DBF_MESSAGE(2, "Could not set group MAC %pM on %s: %d\n",
-                       mac, QETH_CARD_IFNAME(card), rc);
+               QETH_DBF_MESSAGE(2, "Failed to register MAC %pM on %s: %d\n",
+                                mac, QETH_CARD_IFNAME(card), rc);
        return rc;
 }
 
-static int qeth_l2_send_delgroupmac(struct qeth_card *card, __u8 *mac)
+static int qeth_l2_remove_mac(struct qeth_card *card, u8 *mac)
 {
+       enum qeth_ipa_cmds cmd = is_multicast_ether_addr_64bits(mac) ?
+                                       IPA_CMD_DELGMAC : IPA_CMD_DELVMAC;
        int rc;
 
-       QETH_CARD_TEXT(card, 2, "L2Dgmac");
-       rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_DELGMAC);
+       QETH_CARD_TEXT(card, 2, "L2Rmac");
+       rc = qeth_l2_send_setdelmac(card, mac, cmd);
        if (rc)
-               QETH_DBF_MESSAGE(2,
-                       "Could not delete group MAC %pM on %s: %d\n",
-                       mac, QETH_CARD_IFNAME(card), rc);
+               QETH_DBF_MESSAGE(2, "Failed to delete MAC %pM on %s: %d\n",
+                                mac, QETH_CARD_IFNAME(card), rc);
        return rc;
 }
 
-static int qeth_l2_write_mac(struct qeth_card *card, struct qeth_mac *mac)
-{
-       if (mac->is_uc) {
-               return qeth_l2_send_setdelmac(card, mac->mac_addr,
-                                               IPA_CMD_SETVMAC);
-       } else {
-               return qeth_l2_send_setgroupmac(card, mac->mac_addr);
-       }
-}
-
-static int qeth_l2_remove_mac(struct qeth_card *card, struct qeth_mac *mac)
-{
-       if (mac->is_uc) {
-               return qeth_l2_send_setdelmac(card, mac->mac_addr,
-                                               IPA_CMD_DELVMAC);
-       } else {
-               return qeth_l2_send_delgroupmac(card, mac->mac_addr);
-       }
-}
-
 static void qeth_l2_del_all_macs(struct qeth_card *card)
 {
        struct qeth_mac *mac;
@@ -549,7 +532,7 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p)
                QETH_CARD_TEXT(card, 3, "setmcTYP");
                return -EOPNOTSUPP;
        }
-       QETH_CARD_HEX(card, 3, addr->sa_data, OSA_ADDR_LEN);
+       QETH_CARD_HEX(card, 3, addr->sa_data, ETH_ALEN);
        if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
                QETH_CARD_TEXT(card, 3, "setmcREC");
                return -ERESTARTSYS;
@@ -597,27 +580,23 @@ static void qeth_promisc_to_bridge(struct qeth_card *card)
  * only if there is not in the hash table storage already
  *
 */
-static void qeth_l2_add_mac(struct qeth_card *card, struct netdev_hw_addr *ha,
-                           u8 is_uc)
+static void qeth_l2_add_mac(struct qeth_card *card, struct netdev_hw_addr *ha)
 {
        u32 mac_hash = get_unaligned((u32 *)(&ha->addr[2]));
        struct qeth_mac *mac;
 
        hash_for_each_possible(card->mac_htable, mac, hnode, mac_hash) {
-               if (is_uc == mac->is_uc &&
-                   !memcmp(ha->addr, mac->mac_addr, OSA_ADDR_LEN)) {
+               if (ether_addr_equal_64bits(ha->addr, mac->mac_addr)) {
                        mac->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
                        return;
                }
        }
 
        mac = kzalloc(sizeof(struct qeth_mac), GFP_ATOMIC);
-
        if (!mac)
                return;
 
-       memcpy(mac->mac_addr, ha->addr, OSA_ADDR_LEN);
-       mac->is_uc = is_uc;
+       ether_addr_copy(mac->mac_addr, ha->addr);
        mac->disp_flag = QETH_DISP_ADDR_ADD;
 
        hash_add(card->mac_htable, &mac->hnode, mac_hash);
@@ -643,26 +622,29 @@ static void qeth_l2_set_rx_mode(struct net_device *dev)
        spin_lock_bh(&card->mclock);
 
        netdev_for_each_mc_addr(ha, dev)
-               qeth_l2_add_mac(card, ha, 0);
-
+               qeth_l2_add_mac(card, ha);
        netdev_for_each_uc_addr(ha, dev)
-               qeth_l2_add_mac(card, ha, 1);
+               qeth_l2_add_mac(card, ha);
 
        hash_for_each_safe(card->mac_htable, i, tmp, mac, hnode) {
-               if (mac->disp_flag == QETH_DISP_ADDR_DELETE) {
-                       qeth_l2_remove_mac(card, mac);
+               switch (mac->disp_flag) {
+               case QETH_DISP_ADDR_DELETE:
+                       qeth_l2_remove_mac(card, mac->mac_addr);
                        hash_del(&mac->hnode);
                        kfree(mac);
-
-               } else if (mac->disp_flag == QETH_DISP_ADDR_ADD) {
-                       rc = qeth_l2_write_mac(card, mac);
+                       break;
+               case QETH_DISP_ADDR_ADD:
+                       rc = qeth_l2_write_mac(card, mac->mac_addr);
                        if (rc) {
                                hash_del(&mac->hnode);
                                kfree(mac);
-                       } else
-                               mac->disp_flag = QETH_DISP_ADDR_DELETE;
-               } else
+                               break;
+                       }
+                       /* fall through */
+               default:
+                       /* for next call to set_rx_mode(): */
                        mac->disp_flag = QETH_DISP_ADDR_DELETE;
+               }
        }
 
        spin_unlock_bh(&card->mclock);
index e583383..bdd45f4 100644 (file)
@@ -29,7 +29,7 @@ struct qeth_ipaddr {
         */
        int  ref_counter;
        enum qeth_prot_versions proto;
-       unsigned char mac[OSA_ADDR_LEN];
+       unsigned char mac[ETH_ALEN];
        union {
                struct {
                        unsigned int addr;
@@ -69,19 +69,20 @@ struct qeth_ipato_entry {
 extern const struct attribute_group *qeth_l3_attr_groups[];
 
 void qeth_l3_ipaddr_to_string(enum qeth_prot_versions, const __u8 *, char *);
-int qeth_l3_string_to_ipaddr(const char *, enum qeth_prot_versions, __u8 *);
 int qeth_l3_create_device_attributes(struct device *);
 void qeth_l3_remove_device_attributes(struct device *);
 int qeth_l3_setrouting_v4(struct qeth_card *);
 int qeth_l3_setrouting_v6(struct qeth_card *);
 int qeth_l3_add_ipato_entry(struct qeth_card *, struct qeth_ipato_entry *);
-void qeth_l3_del_ipato_entry(struct qeth_card *, enum qeth_prot_versions,
-                       u8 *, int);
+int qeth_l3_del_ipato_entry(struct qeth_card *card,
+                           enum qeth_prot_versions proto, u8 *addr,
+                           int mask_bits);
 int qeth_l3_add_vipa(struct qeth_card *, enum qeth_prot_versions, const u8 *);
-void qeth_l3_del_vipa(struct qeth_card *, enum qeth_prot_versions, const u8 *);
+int qeth_l3_del_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
+                    const u8 *addr);
 int qeth_l3_add_rxip(struct qeth_card *, enum qeth_prot_versions, const u8 *);
-void qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions,
-                       const u8 *);
+int qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
+                    const u8 *addr);
 void qeth_l3_update_ipato(struct qeth_card *card);
 struct qeth_ipaddr *qeth_l3_get_addr_buffer(enum qeth_prot_versions);
 int qeth_l3_add_ip(struct qeth_card *, struct qeth_ipaddr *);
index ef0961e..b0c888e 100644 (file)
 #include <linux/kernel.h>
 #include <linux/etherdevice.h>
 #include <linux/ip.h>
+#include <linux/in.h>
 #include <linux/ipv6.h>
 #include <linux/inetdevice.h>
 #include <linux/igmp.h>
 #include <linux/slab.h>
+#include <linux/if_ether.h>
 #include <linux/if_vlan.h>
+#include <linux/skbuff.h>
 
 #include <net/ip.h>
 #include <net/arp.h>
 #include <net/route.h>
+#include <net/ipv6.h>
+#include <net/ip6_route.h>
 #include <net/ip6_fib.h>
 #include <net/ip6_checksum.h>
 #include <net/iucv/af_iucv.h>
 
 static int qeth_l3_set_offline(struct ccwgroup_device *);
 static int qeth_l3_stop(struct net_device *);
-static void qeth_l3_set_multicast_list(struct net_device *);
+static void qeth_l3_set_rx_mode(struct net_device *dev);
 static int qeth_l3_register_addr_entry(struct qeth_card *,
                struct qeth_ipaddr *);
 static int qeth_l3_deregister_addr_entry(struct qeth_card *,
                struct qeth_ipaddr *);
 
-static int qeth_l3_isxdigit(char *buf)
-{
-       while (*buf) {
-               if (!isxdigit(*buf++))
-                       return 0;
-       }
-       return 1;
-}
-
 static void qeth_l3_ipaddr4_to_string(const __u8 *addr, char *buf)
 {
        sprintf(buf, "%pI4", addr);
 }
 
-static int qeth_l3_string_to_ipaddr4(const char *buf, __u8 *addr)
-{
-       int count = 0, rc = 0;
-       unsigned int in[4];
-       char c;
-
-       rc = sscanf(buf, "%u.%u.%u.%u%c",
-                   &in[0], &in[1], &in[2], &in[3], &c);
-       if (rc != 4 && (rc != 5 || c != '\n'))
-               return -EINVAL;
-       for (count = 0; count < 4; count++) {
-               if (in[count] > 255)
-                       return -EINVAL;
-               addr[count] = in[count];
-       }
-       return 0;
-}
-
 static void qeth_l3_ipaddr6_to_string(const __u8 *addr, char *buf)
 {
        sprintf(buf, "%pI6", addr);
 }
 
-static int qeth_l3_string_to_ipaddr6(const char *buf, __u8 *addr)
-{
-       const char *end, *end_tmp, *start;
-       __u16 *in;
-       char num[5];
-       int num2, cnt, out, found, save_cnt;
-       unsigned short in_tmp[8] = {0, };
-
-       cnt = out = found = save_cnt = num2 = 0;
-       end = start = buf;
-       in = (__u16 *) addr;
-       memset(in, 0, 16);
-       while (*end) {
-               end = strchr(start, ':');
-               if (end == NULL) {
-                       end = buf + strlen(buf);
-                       end_tmp = strchr(start, '\n');
-                       if (end_tmp != NULL)
-                               end = end_tmp;
-                       out = 1;
-               }
-               if ((end - start)) {
-                       memset(num, 0, 5);
-                       if ((end - start) > 4)
-                               return -EINVAL;
-                       memcpy(num, start, end - start);
-                       if (!qeth_l3_isxdigit(num))
-                               return -EINVAL;
-                       sscanf(start, "%x", &num2);
-                       if (found)
-                               in_tmp[save_cnt++] = num2;
-                       else
-                               in[cnt++] = num2;
-                       if (out)
-                               break;
-               } else {
-                       if (found)
-                               return -EINVAL;
-                       found = 1;
-               }
-               start = ++end;
-       }
-       if (cnt + save_cnt > 8)
-               return -EINVAL;
-       cnt = 7;
-       while (save_cnt)
-               in[cnt--] = in_tmp[--save_cnt];
-       return 0;
-}
-
 void qeth_l3_ipaddr_to_string(enum qeth_prot_versions proto, const __u8 *addr,
                                char *buf)
 {
@@ -139,17 +67,6 @@ void qeth_l3_ipaddr_to_string(enum qeth_prot_versions proto, const __u8 *addr,
                qeth_l3_ipaddr6_to_string(addr, buf);
 }
 
-int qeth_l3_string_to_ipaddr(const char *buf, enum qeth_prot_versions proto,
-                               __u8 *addr)
-{
-       if (proto == QETH_PROT_IPV4)
-               return qeth_l3_string_to_ipaddr4(buf, addr);
-       else if (proto == QETH_PROT_IPV6)
-               return qeth_l3_string_to_ipaddr6(buf, addr);
-       else
-               return -EINVAL;
-}
-
 static void qeth_l3_convert_addr_to_bits(u8 *addr, u8 *bits, int len)
 {
        int i, j;
@@ -207,8 +124,8 @@ inline int
 qeth_l3_ipaddrs_is_equal(struct qeth_ipaddr *addr1, struct qeth_ipaddr *addr2)
 {
        return addr1->proto == addr2->proto &&
-               !memcmp(&addr1->u, &addr2->u, sizeof(addr1->u))  &&
-               !memcmp(&addr1->mac, &addr2->mac, sizeof(addr1->mac));
+              !memcmp(&addr1->u, &addr2->u, sizeof(addr1->u)) &&
+              ether_addr_equal_64bits(addr1->mac, addr2->mac);
 }
 
 static struct qeth_ipaddr *
@@ -446,7 +363,7 @@ static int qeth_l3_send_setdelmc(struct qeth_card *card,
        if (!iob)
                return -ENOMEM;
        cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
-       memcpy(&cmd->data.setdelipm.mac, addr->mac, OSA_ADDR_LEN);
+       ether_addr_copy(cmd->data.setdelipm.mac, addr->mac);
        if (addr->proto == QETH_PROT_IPV6)
                memcpy(cmd->data.setdelipm.ip6, &addr->u.a6.addr,
                       sizeof(struct in6_addr));
@@ -582,7 +499,6 @@ int qeth_l3_setrouting_v6(struct qeth_card *card)
        int rc = 0;
 
        QETH_CARD_TEXT(card, 3, "setrtg6");
-#ifdef CONFIG_QETH_IPV6
 
        if (!qeth_is_supported(card, IPA_IPV6))
                return 0;
@@ -599,7 +515,6 @@ int qeth_l3_setrouting_v6(struct qeth_card *card)
                        " on %s. Type set to 'no router'.\n", rc,
                        QETH_CARD_IFNAME(card));
        }
-#endif
        return rc;
 }
 
@@ -673,10 +588,12 @@ int qeth_l3_add_ipato_entry(struct qeth_card *card,
        return rc;
 }
 
-void qeth_l3_del_ipato_entry(struct qeth_card *card,
-               enum qeth_prot_versions proto, u8 *addr, int mask_bits)
+int qeth_l3_del_ipato_entry(struct qeth_card *card,
+                           enum qeth_prot_versions proto, u8 *addr,
+                           int mask_bits)
 {
        struct qeth_ipato_entry *ipatoe, *tmp;
+       int rc = -ENOENT;
 
        QETH_CARD_TEXT(card, 2, "delipato");
 
@@ -691,10 +608,12 @@ void qeth_l3_del_ipato_entry(struct qeth_card *card,
                        list_del(&ipatoe->entry);
                        qeth_l3_update_ipato(card);
                        kfree(ipatoe);
+                       rc = 0;
                }
        }
 
        spin_unlock_bh(&card->ip_lock);
+       return rc;
 }
 
 /*
@@ -704,7 +623,7 @@ int qeth_l3_add_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
              const u8 *addr)
 {
        struct qeth_ipaddr *ipaddr;
-       int rc = 0;
+       int rc;
 
        ipaddr = qeth_l3_get_addr_buffer(proto);
        if (ipaddr) {
@@ -728,7 +647,7 @@ int qeth_l3_add_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
        if (qeth_l3_ip_from_hash(card, ipaddr))
                rc = -EEXIST;
        else
-               qeth_l3_add_ip(card, ipaddr);
+               rc = qeth_l3_add_ip(card, ipaddr);
 
        spin_unlock_bh(&card->ip_lock);
 
@@ -737,10 +656,11 @@ int qeth_l3_add_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
        return rc;
 }
 
-void qeth_l3_del_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
-             const u8 *addr)
+int qeth_l3_del_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
+                    const u8 *addr)
 {
        struct qeth_ipaddr *ipaddr;
+       int rc;
 
        ipaddr = qeth_l3_get_addr_buffer(proto);
        if (ipaddr) {
@@ -755,13 +675,14 @@ void qeth_l3_del_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
                }
                ipaddr->type = QETH_IP_TYPE_VIPA;
        } else
-               return;
+               return -ENOMEM;
 
        spin_lock_bh(&card->ip_lock);
-       qeth_l3_delete_ip(card, ipaddr);
+       rc = qeth_l3_delete_ip(card, ipaddr);
        spin_unlock_bh(&card->ip_lock);
 
        kfree(ipaddr);
+       return rc;
 }
 
 /*
@@ -771,7 +692,7 @@ int qeth_l3_add_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
              const u8 *addr)
 {
        struct qeth_ipaddr *ipaddr;
-       int rc = 0;
+       int rc;
 
        ipaddr = qeth_l3_get_addr_buffer(proto);
        if (ipaddr) {
@@ -796,7 +717,7 @@ int qeth_l3_add_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
        if (qeth_l3_ip_from_hash(card, ipaddr))
                rc = -EEXIST;
        else
-               qeth_l3_add_ip(card, ipaddr);
+               rc = qeth_l3_add_ip(card, ipaddr);
 
        spin_unlock_bh(&card->ip_lock);
 
@@ -805,10 +726,11 @@ int qeth_l3_add_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
        return rc;
 }
 
-void qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
-                       const u8 *addr)
+int qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
+                    const u8 *addr)
 {
        struct qeth_ipaddr *ipaddr;
+       int rc;
 
        ipaddr = qeth_l3_get_addr_buffer(proto);
        if (ipaddr) {
@@ -823,13 +745,14 @@ void qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
                }
                ipaddr->type = QETH_IP_TYPE_RXIP;
        } else
-               return;
+               return -ENOMEM;
 
        spin_lock_bh(&card->ip_lock);
-       qeth_l3_delete_ip(card, ipaddr);
+       rc = qeth_l3_delete_ip(card, ipaddr);
        spin_unlock_bh(&card->ip_lock);
 
        kfree(ipaddr);
+       return rc;
 }
 
 static int qeth_l3_register_addr_entry(struct qeth_card *card,
@@ -896,27 +819,6 @@ static int qeth_l3_deregister_addr_entry(struct qeth_card *card,
        return rc;
 }
 
-static u8 qeth_l3_get_qeth_hdr_flags4(int cast_type)
-{
-       if (cast_type == RTN_MULTICAST)
-               return QETH_CAST_MULTICAST;
-       if (cast_type == RTN_BROADCAST)
-               return QETH_CAST_BROADCAST;
-       return QETH_CAST_UNICAST;
-}
-
-static u8 qeth_l3_get_qeth_hdr_flags6(int cast_type)
-{
-       u8 ct = QETH_HDR_PASSTHRU | QETH_HDR_IPV6;
-       if (cast_type == RTN_MULTICAST)
-               return ct | QETH_CAST_MULTICAST;
-       if (cast_type == RTN_ANYCAST)
-               return ct | QETH_CAST_ANYCAST;
-       if (cast_type == RTN_BROADCAST)
-               return ct | QETH_CAST_BROADCAST;
-       return ct | QETH_CAST_UNICAST;
-}
-
 static int qeth_l3_setadapter_parms(struct qeth_card *card)
 {
        int rc = 0;
@@ -933,7 +835,6 @@ static int qeth_l3_setadapter_parms(struct qeth_card *card)
        return rc;
 }
 
-#ifdef CONFIG_QETH_IPV6
 static int qeth_l3_send_simple_setassparms_ipv6(struct qeth_card *card,
                enum qeth_ipa_funcs ipa_func, __u16 cmd_code)
 {
@@ -949,7 +850,6 @@ static int qeth_l3_send_simple_setassparms_ipv6(struct qeth_card *card,
                                   qeth_setassparms_cb, NULL);
        return rc;
 }
-#endif
 
 static int qeth_l3_start_ipa_arp_processing(struct qeth_card *card)
 {
@@ -1045,7 +945,6 @@ static int qeth_l3_start_ipa_multicast(struct qeth_card *card)
        return rc;
 }
 
-#ifdef CONFIG_QETH_IPV6
 static int qeth_l3_softsetup_ipv6(struct qeth_card *card)
 {
        int rc;
@@ -1091,12 +990,9 @@ out:
        dev_info(&card->gdev->dev, "IPV6 enabled\n");
        return 0;
 }
-#endif
 
 static int qeth_l3_start_ipa_ipv6(struct qeth_card *card)
 {
-       int rc = 0;
-
        QETH_CARD_TEXT(card, 3, "strtipv6");
 
        if (!qeth_is_supported(card, IPA_IPV6)) {
@@ -1104,10 +1000,7 @@ static int qeth_l3_start_ipa_ipv6(struct qeth_card *card)
                        "IPv6 not supported on %s\n", QETH_CARD_IFNAME(card));
                return 0;
        }
-#ifdef CONFIG_QETH_IPV6
-       rc = qeth_l3_softsetup_ipv6(card);
-#endif
-       return rc ;
+       return qeth_l3_softsetup_ipv6(card);
 }
 
 static int qeth_l3_start_ipa_broadcast(struct qeth_card *card)
@@ -1179,8 +1072,8 @@ static int qeth_l3_iqd_read_initial_mac_cb(struct qeth_card *card,
 
        cmd = (struct qeth_ipa_cmd *) data;
        if (cmd->hdr.return_code == 0)
-               memcpy(card->dev->dev_addr,
-                       cmd->data.create_destroy_addr.unique_id, ETH_ALEN);
+               ether_addr_copy(card->dev->dev_addr,
+                               cmd->data.create_destroy_addr.unique_id);
        else
                eth_random_addr(card->dev->dev_addr);
 
@@ -1328,81 +1221,22 @@ qeth_diags_trace(struct qeth_card *card, enum qeth_diags_trace_cmds diags_cmd)
        return qeth_send_ipa_cmd(card, iob, qeth_diags_trace_cb, NULL);
 }
 
-static void qeth_l3_get_mac_for_ipm(__be32 ipm, char *mac)
-{
-       ip_eth_mc_map(ipm, mac);
-}
-
-static void qeth_l3_mark_all_mc_to_be_deleted(struct qeth_card *card)
-{
-       struct qeth_ipaddr *addr;
-       int i;
-
-       hash_for_each(card->ip_mc_htable, i, addr, hnode)
-               addr->disp_flag = QETH_DISP_ADDR_DELETE;
-
-}
-
-static void qeth_l3_add_all_new_mc(struct qeth_card *card)
-{
-       struct qeth_ipaddr *addr;
-       struct hlist_node *tmp;
-       int i;
-       int rc;
-
-       hash_for_each_safe(card->ip_mc_htable, i, tmp, addr, hnode) {
-               if (addr->disp_flag == QETH_DISP_ADDR_ADD) {
-                       rc = qeth_l3_register_addr_entry(card, addr);
-                       if (!rc || (rc == IPA_RC_LAN_OFFLINE))
-                               addr->ref_counter = 1;
-                       else {
-                               hash_del(&addr->hnode);
-                               kfree(addr);
-                       }
-               }
-       }
-
-}
-
-static void qeth_l3_delete_nonused_mc(struct qeth_card *card)
-{
-       struct qeth_ipaddr *addr;
-       struct hlist_node *tmp;
-       int i;
-       int rc;
-
-       hash_for_each_safe(card->ip_mc_htable, i, tmp, addr, hnode) {
-               if (addr->disp_flag == QETH_DISP_ADDR_DELETE) {
-                       rc = qeth_l3_deregister_addr_entry(card, addr);
-                       if (!rc || (rc == IPA_RC_MC_ADDR_NOT_FOUND)) {
-                               hash_del(&addr->hnode);
-                               kfree(addr);
-                       }
-               }
-       }
-
-}
-
-
 static void
 qeth_l3_add_mc_to_hash(struct qeth_card *card, struct in_device *in4_dev)
 {
        struct ip_mc_list *im4;
        struct qeth_ipaddr *tmp, *ipm;
-       char buf[MAX_ADDR_LEN];
 
        QETH_CARD_TEXT(card, 4, "addmc");
 
        tmp = qeth_l3_get_addr_buffer(QETH_PROT_IPV4);
-               if (!tmp)
-                       return;
+       if (!tmp)
+               return;
 
        for (im4 = rcu_dereference(in4_dev->mc_list); im4 != NULL;
             im4 = rcu_dereference(im4->next_rcu)) {
-               qeth_l3_get_mac_for_ipm(im4->multiaddr, buf);
-
+               ip_eth_mc_map(im4->multiaddr, tmp->mac);
                tmp->u.a4.addr = be32_to_cpu(im4->multiaddr);
-               memcpy(tmp->mac, buf, sizeof(tmp->mac));
                tmp->is_multicast = 1;
 
                ipm = qeth_l3_ip_from_hash(card, tmp);
@@ -1412,7 +1246,7 @@ qeth_l3_add_mc_to_hash(struct qeth_card *card, struct in_device *in4_dev)
                        ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV4);
                        if (!ipm)
                                continue;
-                       memcpy(ipm->mac, buf, sizeof(tmp->mac));
+                       ether_addr_copy(ipm->mac, tmp->mac);
                        ipm->u.a4.addr = be32_to_cpu(im4->multiaddr);
                        ipm->is_multicast = 1;
                        ipm->disp_flag = QETH_DISP_ADDR_ADD;
@@ -1466,25 +1300,21 @@ unlock:
        rcu_read_unlock();
 }
 
-#ifdef CONFIG_QETH_IPV6
-static void
-qeth_l3_add_mc6_to_hash(struct qeth_card *card, struct inet6_dev *in6_dev)
+static void qeth_l3_add_mc6_to_hash(struct qeth_card *card,
+                                   struct inet6_dev *in6_dev)
 {
        struct qeth_ipaddr *ipm;
        struct ifmcaddr6 *im6;
        struct qeth_ipaddr *tmp;
-       char buf[MAX_ADDR_LEN];
 
        QETH_CARD_TEXT(card, 4, "addmc6");
 
        tmp = qeth_l3_get_addr_buffer(QETH_PROT_IPV6);
-               if (!tmp)
-                       return;
+       if (!tmp)
+               return;
 
        for (im6 = in6_dev->mc_list; im6 != NULL; im6 = im6->next) {
-               ndisc_mc_map(&im6->mca_addr, buf, in6_dev->dev, 0);
-
-               memcpy(tmp->mac, buf, sizeof(tmp->mac));
+               ipv6_eth_mc_map(&im6->mca_addr, tmp->mac);
                memcpy(&tmp->u.a6.addr, &im6->mca_addr.s6_addr,
                       sizeof(struct in6_addr));
                tmp->is_multicast = 1;
@@ -1499,7 +1329,7 @@ qeth_l3_add_mc6_to_hash(struct qeth_card *card, struct inet6_dev *in6_dev)
                if (!ipm)
                        continue;
 
-               memcpy(ipm->mac, buf, OSA_ADDR_LEN);
+               ether_addr_copy(ipm->mac, tmp->mac);
                memcpy(&ipm->u.a6.addr, &im6->mca_addr.s6_addr,
                       sizeof(struct in6_addr));
                ipm->is_multicast = 1;
@@ -1560,7 +1390,6 @@ static void qeth_l3_add_multicast_ipv6(struct qeth_card *card)
        rcu_read_unlock();
        in6_dev_put(in6_dev);
 }
-#endif /* CONFIG_QETH_IPV6 */
 
 static void qeth_l3_free_vlan_addresses4(struct qeth_card *card,
                        unsigned short vid)
@@ -1600,9 +1429,8 @@ out:
 }
 
 static void qeth_l3_free_vlan_addresses6(struct qeth_card *card,
-                       unsigned short vid)
+                                        unsigned short vid)
 {
-#ifdef CONFIG_QETH_IPV6
        struct inet6_dev *in6_dev;
        struct inet6_ifaddr *ifa;
        struct qeth_ipaddr *addr;
@@ -1637,7 +1465,6 @@ static void qeth_l3_free_vlan_addresses6(struct qeth_card *card,
        kfree(addr);
 out:
        in6_dev_put(in6_dev);
-#endif /* CONFIG_QETH_IPV6 */
 }
 
 static void qeth_l3_free_vlan_addresses(struct qeth_card *card,
@@ -1672,44 +1499,31 @@ static int qeth_l3_vlan_rx_kill_vid(struct net_device *dev,
        /* unregister IP addresses of vlan device */
        qeth_l3_free_vlan_addresses(card, vid);
        clear_bit(vid, card->active_vlans);
-       qeth_l3_set_multicast_list(card->dev);
+       qeth_l3_set_rx_mode(dev);
        return 0;
 }
 
 static void qeth_l3_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
                                struct qeth_hdr *hdr)
 {
-       __u16 prot;
-       struct iphdr *ip_hdr;
-       unsigned char tg_addr[MAX_ADDR_LEN];
-
        if (!(hdr->hdr.l3.flags & QETH_HDR_PASSTHRU)) {
-               prot = (hdr->hdr.l3.flags & QETH_HDR_IPV6) ? ETH_P_IPV6 :
-                             ETH_P_IP;
+               u16 prot = (hdr->hdr.l3.flags & QETH_HDR_IPV6) ? ETH_P_IPV6 :
+                                                                ETH_P_IP;
+               unsigned char tg_addr[ETH_ALEN];
+
+               skb_reset_network_header(skb);
                switch (hdr->hdr.l3.flags & QETH_HDR_CAST_MASK) {
                case QETH_CAST_MULTICAST:
-                       switch (prot) {
-#ifdef CONFIG_QETH_IPV6
-                       case ETH_P_IPV6:
-                               ndisc_mc_map((struct in6_addr *)
-                                    skb->data + 24,
-                                    tg_addr, card->dev, 0);
-                               break;
-#endif
-                       case ETH_P_IP:
-                               ip_hdr = (struct iphdr *)skb->data;
-                               ip_eth_mc_map(ip_hdr->daddr, tg_addr);
-                               break;
-                       default:
-                               memcpy(tg_addr, card->dev->broadcast,
-                                       card->dev->addr_len);
-                       }
+                       if (prot == ETH_P_IP)
+                               ip_eth_mc_map(ip_hdr(skb)->daddr, tg_addr);
+                       else
+                               ipv6_eth_mc_map(&ipv6_hdr(skb)->daddr, tg_addr);
+
                        card->stats.multicast++;
                        skb->pkt_type = PACKET_MULTICAST;
                        break;
                case QETH_CAST_BROADCAST:
-                       memcpy(tg_addr, card->dev->broadcast,
-                               card->dev->addr_len);
+                       ether_addr_copy(tg_addr, card->dev->broadcast);
                        card->stats.multicast++;
                        skb->pkt_type = PACKET_BROADCAST;
                        break;
@@ -1721,12 +1535,11 @@ static void qeth_l3_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
                                skb->pkt_type = PACKET_OTHERHOST;
                        else
                                skb->pkt_type = PACKET_HOST;
-                       memcpy(tg_addr, card->dev->dev_addr,
-                               card->dev->addr_len);
+                       ether_addr_copy(tg_addr, card->dev->dev_addr);
                }
                if (hdr->hdr.l3.ext_flags & QETH_HDR_EXT_SRC_MAC_ADDR)
                        card->dev->header_ops->create(skb, card->dev, prot,
-                               tg_addr, &hdr->hdr.l3.dest_addr[2],
+                               tg_addr, &hdr->hdr.l3.next_hop.rx.src_mac,
                                card->dev->addr_len);
                else
                        card->dev->header_ops->create(skb, card->dev, prot,
@@ -1741,7 +1554,7 @@ static void qeth_l3_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
                                      QETH_HDR_EXT_INCLUDE_VLAN_TAG))) {
                u16 tag = (hdr->hdr.l3.ext_flags & QETH_HDR_EXT_VLAN_FRAME) ?
                                hdr->hdr.l3.vlan_id :
-                               *((u16 *)&hdr->hdr.l3.dest_addr[12]);
+                               hdr->hdr.l3.next_hop.rx.vlan_id;
                __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag);
        }
 
@@ -1949,26 +1762,46 @@ qeth_l3_handle_promisc_mode(struct qeth_card *card)
        }
 }
 
-static void qeth_l3_set_multicast_list(struct net_device *dev)
+static void qeth_l3_set_rx_mode(struct net_device *dev)
 {
        struct qeth_card *card = dev->ml_priv;
+       struct qeth_ipaddr *addr;
+       struct hlist_node *tmp;
+       int i, rc;
 
        QETH_CARD_TEXT(card, 3, "setmulti");
        if (qeth_threads_running(card, QETH_RECOVER_THREAD) &&
            (card->state != CARD_STATE_UP))
                return;
        if (!card->options.sniffer) {
-
                spin_lock_bh(&card->mclock);
 
-               qeth_l3_mark_all_mc_to_be_deleted(card);
-
                qeth_l3_add_multicast_ipv4(card);
-#ifdef CONFIG_QETH_IPV6
                qeth_l3_add_multicast_ipv6(card);
-#endif
-               qeth_l3_delete_nonused_mc(card);
-               qeth_l3_add_all_new_mc(card);
+
+               hash_for_each_safe(card->ip_mc_htable, i, tmp, addr, hnode) {
+                       switch (addr->disp_flag) {
+                       case QETH_DISP_ADDR_DELETE:
+                               rc = qeth_l3_deregister_addr_entry(card, addr);
+                               if (!rc || rc == IPA_RC_MC_ADDR_NOT_FOUND) {
+                                       hash_del(&addr->hnode);
+                                       kfree(addr);
+                               }
+                               break;
+                       case QETH_DISP_ADDR_ADD:
+                               rc = qeth_l3_register_addr_entry(card, addr);
+                               if (rc && rc != IPA_RC_LAN_OFFLINE) {
+                                       hash_del(&addr->hnode);
+                                       kfree(addr);
+                                       break;
+                               }
+                               addr->ref_counter = 1;
+                               /* fall through */
+                       default:
+                               /* for next call to set_rx_mode(): */
+                               addr->disp_flag = QETH_DISP_ADDR_DELETE;
+                       }
+               }
 
                spin_unlock_bh(&card->mclock);
 
@@ -2237,12 +2070,10 @@ static int qeth_l3_arp_query(struct qeth_card *card, char __user *udata)
                        rc = -EFAULT;
                goto free_and_out;
        }
-#ifdef CONFIG_QETH_IPV6
        if (qinfo.mask_bits & QETH_QARP_WITH_IPV6) {
                /* fails in case of GuestLAN QDIO mode */
                qeth_l3_query_arp_cache_info(card, QETH_PROT_IPV6, &qinfo);
        }
-#endif
        if (copy_to_user(udata, qinfo.udata, qinfo.udata_len)) {
                QETH_CARD_TEXT(card, 4, "qactf");
                rc = -EFAULT;
@@ -2422,9 +2253,8 @@ static int qeth_l3_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
        return rc;
 }
 
-static int qeth_l3_get_cast_type(struct qeth_card *card, struct sk_buff *skb)
+static int qeth_l3_get_cast_type(struct sk_buff *skb)
 {
-       int cast_type = RTN_UNSPEC;
        struct neighbour *n = NULL;
        struct dst_entry *dst;
 
@@ -2433,48 +2263,34 @@ static int qeth_l3_get_cast_type(struct qeth_card *card, struct sk_buff *skb)
        if (dst)
                n = dst_neigh_lookup_skb(dst, skb);
        if (n) {
-               cast_type = n->type;
+               int cast_type = n->type;
+
                rcu_read_unlock();
                neigh_release(n);
                if ((cast_type == RTN_BROADCAST) ||
                    (cast_type == RTN_MULTICAST) ||
                    (cast_type == RTN_ANYCAST))
                        return cast_type;
-               else
-                       return RTN_UNSPEC;
+               return RTN_UNSPEC;
        }
        rcu_read_unlock();
 
-       /* try something else */
+       /* no neighbour (eg AF_PACKET), fall back to target's IP address ... */
        if (be16_to_cpu(skb->protocol) == ETH_P_IPV6)
-               return (skb_network_header(skb)[24] == 0xff) ?
-                               RTN_MULTICAST : 0;
+               return ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr) ?
+                               RTN_MULTICAST : RTN_UNSPEC;
        else if (be16_to_cpu(skb->protocol) == ETH_P_IP)
-               return ((skb_network_header(skb)[16] & 0xf0) == 0xe0) ?
-                               RTN_MULTICAST : 0;
-       /* ... */
-       if (!memcmp(skb->data, skb->dev->broadcast, 6))
+               return ipv4_is_multicast(ip_hdr(skb)->daddr) ?
+                               RTN_MULTICAST : RTN_UNSPEC;
+
+       /* ... and MAC address */
+       if (ether_addr_equal_64bits(eth_hdr(skb)->h_dest, skb->dev->broadcast))
                return RTN_BROADCAST;
-       else {
-               u16 hdr_mac;
-
-               hdr_mac = *((u16 *)skb->data);
-               /* tr multicast? */
-               switch (card->info.link_type) {
-               case QETH_LINK_TYPE_HSTR:
-               case QETH_LINK_TYPE_LANE_TR:
-                       if ((hdr_mac == QETH_TR_MAC_NC) ||
-                           (hdr_mac == QETH_TR_MAC_C))
-                               return RTN_MULTICAST;
-                       break;
-               /* eth or so multicast? */
-               default:
-               if ((hdr_mac == QETH_ETH_MAC_V4) ||
-                           (hdr_mac == QETH_ETH_MAC_V6))
-                               return RTN_MULTICAST;
-               }
-       }
-       return cast_type;
+       if (is_multicast_ether_addr(eth_hdr(skb)->h_dest))
+               return RTN_MULTICAST;
+
+       /* default to unicast */
+       return RTN_UNSPEC;
 }
 
 static void qeth_l3_fill_af_iucv_hdr(struct qeth_card *card,
@@ -2494,17 +2310,27 @@ static void qeth_l3_fill_af_iucv_hdr(struct qeth_card *card,
        daddr[0] = 0xfe;
        daddr[1] = 0x80;
        memcpy(&daddr[8], iucv_hdr->destUserID, 8);
-       memcpy(hdr->hdr.l3.dest_addr, daddr, 16);
+       memcpy(hdr->hdr.l3.next_hop.ipv6_addr, daddr, 16);
 }
 
-static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
-               struct sk_buff *skb, int ipv, int cast_type)
+static u8 qeth_l3_cast_type_to_flag(int cast_type)
 {
-       struct dst_entry *dst;
+       if (cast_type == RTN_MULTICAST)
+               return QETH_CAST_MULTICAST;
+       if (cast_type == RTN_ANYCAST)
+               return QETH_CAST_ANYCAST;
+       if (cast_type == RTN_BROADCAST)
+               return QETH_CAST_BROADCAST;
+       return QETH_CAST_UNICAST;
+}
 
+static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
+                               struct sk_buff *skb, int ipv, int cast_type,
+                               unsigned int data_len)
+{
        memset(hdr, 0, sizeof(struct qeth_hdr));
        hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3;
-       hdr->hdr.l3.ext_flags = 0;
+       hdr->hdr.l3.length = data_len;
 
        /*
         * before we're going to overwrite this location with next hop ip.
@@ -2518,44 +2344,40 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
                hdr->hdr.l3.vlan_id = skb_vlan_tag_get(skb);
        }
 
-       hdr->hdr.l3.length = skb->len - sizeof(struct qeth_hdr);
+       /* OSA only: */
+       if (!ipv) {
+               hdr->hdr.l3.flags = QETH_HDR_PASSTHRU;
+               if (ether_addr_equal_64bits(eth_hdr(skb)->h_dest,
+                                           skb->dev->broadcast))
+                       hdr->hdr.l3.flags |= QETH_CAST_BROADCAST;
+               else
+                       hdr->hdr.l3.flags |= (cast_type == RTN_MULTICAST) ?
+                               QETH_CAST_MULTICAST : QETH_CAST_UNICAST;
+               return;
+       }
 
+       hdr->hdr.l3.flags = qeth_l3_cast_type_to_flag(cast_type);
        rcu_read_lock();
-       dst = skb_dst(skb);
        if (ipv == 4) {
-               struct rtable *rt = (struct rtable *) dst;
-               __be32 *pkey = &ip_hdr(skb)->daddr;
-
-               if (rt && rt->rt_gateway)
-                       pkey = &rt->rt_gateway;
+               struct rtable *rt = skb_rtable(skb);
 
-               /* IPv4 */
-               hdr->hdr.l3.flags = qeth_l3_get_qeth_hdr_flags4(cast_type);
-               memset(hdr->hdr.l3.dest_addr, 0, 12);
-               *((__be32 *) (&hdr->hdr.l3.dest_addr[12])) = *pkey;
-       } else if (ipv == 6) {
-               struct rt6_info *rt = (struct rt6_info *) dst;
-               struct in6_addr *pkey = &ipv6_hdr(skb)->daddr;
+               *((__be32 *) &hdr->hdr.l3.next_hop.ipv4.addr) = (rt) ?
+                               rt_nexthop(rt, ip_hdr(skb)->daddr) :
+                               ip_hdr(skb)->daddr;
+       } else {
+               /* IPv6 */
+               const struct rt6_info *rt = skb_rt6_info(skb);
+               const struct in6_addr *next_hop;
 
                if (rt && !ipv6_addr_any(&rt->rt6i_gateway))
-                       pkey = &rt->rt6i_gateway;
+                       next_hop = &rt->rt6i_gateway;
+               else
+                       next_hop = &ipv6_hdr(skb)->daddr;
+               memcpy(hdr->hdr.l3.next_hop.ipv6_addr, next_hop, 16);
 
-               /* IPv6 */
-               hdr->hdr.l3.flags = qeth_l3_get_qeth_hdr_flags6(cast_type);
-               if (card->info.type == QETH_CARD_TYPE_IQD)
-                       hdr->hdr.l3.flags &= ~QETH_HDR_PASSTHRU;
-               memcpy(hdr->hdr.l3.dest_addr, pkey, 16);
-       } else {
-               if (!memcmp(skb->data + sizeof(struct qeth_hdr),
-                           skb->dev->broadcast, 6)) {
-                       /* broadcast? */
-                       hdr->hdr.l3.flags = QETH_CAST_BROADCAST |
-                                               QETH_HDR_PASSTHRU;
-               } else {
-                       hdr->hdr.l3.flags = (cast_type == RTN_MULTICAST) ?
-                               QETH_CAST_MULTICAST | QETH_HDR_PASSTHRU :
-                               QETH_CAST_UNICAST | QETH_HDR_PASSTHRU;
-               }
+               hdr->hdr.l3.flags |= QETH_HDR_IPV6;
+               if (card->info.type != QETH_CARD_TYPE_IQD)
+                       hdr->hdr.l3.flags |= QETH_HDR_PASSTHRU;
        }
        rcu_read_unlock();
 }
@@ -2587,7 +2409,6 @@ static void qeth_tso_fill_header(struct qeth_card *card,
 
        /*fix header to TSO values ...*/
        hdr->hdr.hdr.l3.id = QETH_HEADER_TYPE_TSO;
-       hdr->hdr.hdr.l3.length = skb->len - sizeof(struct qeth_hdr_tso);
        /*set values which are fix for the first approach ...*/
        hdr->ext.hdr_tot_len = (__u16) sizeof(struct qeth_hdr_ext_tso);
        hdr->ext.imb_hdr_no  = 1;
@@ -2655,7 +2476,7 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
        struct qeth_card *card = dev->ml_priv;
        struct sk_buff *new_skb = NULL;
        int ipv = qeth_get_ip_version(skb);
-       int cast_type = qeth_l3_get_cast_type(card, skb);
+       int cast_type = qeth_l3_get_cast_type(skb);
        struct qeth_qdio_out_q *queue =
                card->qdio.out_qs[card->qdio.do_prio_queueing
                        || (cast_type && card->info.is_multicast_different) ?
@@ -2748,21 +2569,23 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
        if (use_tso) {
                hdr = skb_push(new_skb, sizeof(struct qeth_hdr_tso));
                memset(hdr, 0, sizeof(struct qeth_hdr_tso));
-               qeth_l3_fill_header(card, hdr, new_skb, ipv, cast_type);
+               qeth_l3_fill_header(card, hdr, new_skb, ipv, cast_type,
+                                   new_skb->len - sizeof(struct qeth_hdr_tso));
                qeth_tso_fill_header(card, hdr, new_skb);
                hdr_elements++;
        } else {
                if (data_offset < 0) {
                        hdr = skb_push(new_skb, sizeof(struct qeth_hdr));
-                       qeth_l3_fill_header(card, hdr, new_skb, ipv,
-                                               cast_type);
+                       qeth_l3_fill_header(card, hdr, new_skb, ipv, cast_type,
+                                           new_skb->len -
+                                           sizeof(struct qeth_hdr));
                } else {
                        if (be16_to_cpu(new_skb->protocol) == ETH_P_AF_IUCV)
                                qeth_l3_fill_af_iucv_hdr(card, hdr, new_skb);
                        else {
                                qeth_l3_fill_header(card, hdr, new_skb, ipv,
-                                                       cast_type);
-                               hdr->hdr.l3.length = new_skb->len - data_offset;
+                                                   cast_type,
+                                                   new_skb->len - data_offset);
                        }
                }
 
@@ -2930,7 +2753,7 @@ static const struct net_device_ops qeth_l3_netdev_ops = {
        .ndo_get_stats          = qeth_get_stats,
        .ndo_start_xmit         = qeth_l3_hard_start_xmit,
        .ndo_validate_addr      = eth_validate_addr,
-       .ndo_set_rx_mode        = qeth_l3_set_multicast_list,
+       .ndo_set_rx_mode        = qeth_l3_set_rx_mode,
        .ndo_do_ioctl           = qeth_do_ioctl,
        .ndo_change_mtu         = qeth_change_mtu,
        .ndo_fix_features       = qeth_fix_features,
@@ -2947,7 +2770,7 @@ static const struct net_device_ops qeth_l3_osa_netdev_ops = {
        .ndo_start_xmit         = qeth_l3_hard_start_xmit,
        .ndo_features_check     = qeth_features_check,
        .ndo_validate_addr      = eth_validate_addr,
-       .ndo_set_rx_mode        = qeth_l3_set_multicast_list,
+       .ndo_set_rx_mode        = qeth_l3_set_rx_mode,
        .ndo_do_ioctl           = qeth_do_ioctl,
        .ndo_change_mtu         = qeth_change_mtu,
        .ndo_fix_features       = qeth_fix_features,
@@ -3145,7 +2968,7 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
                        __qeth_l3_open(card->dev);
                else
                        dev_open(card->dev);
-               qeth_l3_set_multicast_list(card->dev);
+               qeth_l3_set_rx_mode(card->dev);
                qeth_recover_features(card->dev);
                rtnl_unlock();
        }
@@ -3371,10 +3194,6 @@ static struct notifier_block qeth_l3_ip_notifier = {
        NULL,
 };
 
-#ifdef CONFIG_QETH_IPV6
-/**
- * IPv6 event handler
- */
 static int qeth_l3_ip6_event(struct notifier_block *this,
                             unsigned long event, void *ptr)
 {
@@ -3419,7 +3238,6 @@ static struct notifier_block qeth_l3_ip6_notifier = {
        qeth_l3_ip6_event,
        NULL,
 };
-#endif
 
 static int qeth_l3_register_notifiers(void)
 {
@@ -3429,35 +3247,25 @@ static int qeth_l3_register_notifiers(void)
        rc = register_inetaddr_notifier(&qeth_l3_ip_notifier);
        if (rc)
                return rc;
-#ifdef CONFIG_QETH_IPV6
        rc = register_inet6addr_notifier(&qeth_l3_ip6_notifier);
        if (rc) {
                unregister_inetaddr_notifier(&qeth_l3_ip_notifier);
                return rc;
        }
-#else
-       pr_warn("There is no IPv6 support for the layer 3 discipline\n");
-#endif
        return 0;
 }
 
 static void qeth_l3_unregister_notifiers(void)
 {
-
        QETH_DBF_TEXT(SETUP, 5, "unregnot");
        WARN_ON(unregister_inetaddr_notifier(&qeth_l3_ip_notifier));
-#ifdef CONFIG_QETH_IPV6
        WARN_ON(unregister_inet6addr_notifier(&qeth_l3_ip6_notifier));
-#endif /* QETH_IPV6 */
 }
 
 static int __init qeth_l3_init(void)
 {
-       int rc = 0;
-
        pr_info("register layer 3 discipline\n");
-       rc = qeth_l3_register_notifiers();
-       return rc;
+       return qeth_l3_register_notifiers();
 }
 
 static void __exit qeth_l3_exit(void)
index 6ea2b52..a645cfe 100644 (file)
 #include <linux/slab.h>
 #include <asm/ebcdic.h>
 #include <linux/hashtable.h>
+#include <linux/inet.h>
 #include "qeth_l3.h"
 
 #define QETH_DEVICE_ATTR(_id, _name, _mode, _show, _store) \
 struct device_attribute dev_attr_##_id = __ATTR(_name, _mode, _show, _store)
 
+static int qeth_l3_string_to_ipaddr(const char *buf,
+                                   enum qeth_prot_versions proto, u8 *addr)
+{
+       const char *end;
+
+       if ((proto == QETH_PROT_IPV4 && !in4_pton(buf, -1, addr, -1, &end)) ||
+           (proto == QETH_PROT_IPV6 && !in6_pton(buf, -1, addr, -1, &end)))
+               return -EINVAL;
+       return 0;
+}
+
 static ssize_t qeth_l3_dev_route_show(struct qeth_card *card,
                        struct qeth_routing_info *route, char *buf)
 {
@@ -262,7 +274,7 @@ static ssize_t qeth_l3_dev_hsuid_store(struct device *dev,
        struct qeth_card *card = dev_get_drvdata(dev);
        struct qeth_ipaddr *addr;
        char *tmp;
-       int i;
+       int rc, i;
 
        if (!card)
                return -EINVAL;
@@ -331,11 +343,11 @@ static ssize_t qeth_l3_dev_hsuid_store(struct device *dev,
                return -ENOMEM;
 
        spin_lock_bh(&card->ip_lock);
-       qeth_l3_add_ip(card, addr);
+       rc = qeth_l3_add_ip(card, addr);
        spin_unlock_bh(&card->ip_lock);
        kfree(addr);
 
-       return count;
+       return rc ? rc : count;
 }
 
 static DEVICE_ATTR(hsuid, 0644, qeth_l3_dev_hsuid_show,
@@ -573,7 +585,7 @@ static ssize_t qeth_l3_dev_ipato_del_store(const char *buf, size_t count,
        mutex_lock(&card->conf_mutex);
        rc = qeth_l3_parse_ipatoe(buf, proto, addr, &mask_bits);
        if (!rc)
-               qeth_l3_del_ipato_entry(card, proto, addr, mask_bits);
+               rc = qeth_l3_del_ipato_entry(card, proto, addr, mask_bits);
        mutex_unlock(&card->conf_mutex);
        return rc ? rc : count;
 }
@@ -693,22 +705,25 @@ static const struct attribute_group qeth_device_ipato_group = {
        .attrs = qeth_ipato_device_attrs,
 };
 
-static ssize_t qeth_l3_dev_vipa_add_show(char *buf, struct qeth_card *card,
-                       enum qeth_prot_versions proto)
+static ssize_t qeth_l3_dev_ip_add_show(struct device *dev, char *buf,
+                                      enum qeth_prot_versions proto,
+                                      enum qeth_ip_types type)
 {
+       struct qeth_card *card = dev_get_drvdata(dev);
        struct qeth_ipaddr *ipaddr;
        char addr_str[40];
        int str_len = 0;
        int entry_len; /* length of 1 entry string, differs between v4 and v6 */
        int i;
 
+       if (!card)
+               return -EINVAL;
+
        entry_len = (proto == QETH_PROT_IPV4)? 12 : 40;
        entry_len += 2; /* \n + terminator */
        spin_lock_bh(&card->ip_lock);
        hash_for_each(card->ip_htable, i, ipaddr, hnode) {
-               if (ipaddr->proto != proto)
-                       continue;
-               if (ipaddr->type != QETH_IP_TYPE_VIPA)
+               if (ipaddr->proto != proto || ipaddr->type != type)
                        continue;
                /* String must not be longer than PAGE_SIZE. So we check if
                 * string length gets near PAGE_SIZE. Then we can savely display
@@ -727,14 +742,11 @@ static ssize_t qeth_l3_dev_vipa_add_show(char *buf, struct qeth_card *card,
 }
 
 static ssize_t qeth_l3_dev_vipa_add4_show(struct device *dev,
-                       struct device_attribute *attr, char *buf)
+                                         struct device_attribute *attr,
+                                         char *buf)
 {
-       struct qeth_card *card = dev_get_drvdata(dev);
-
-       if (!card)
-               return -EINVAL;
-
-       return qeth_l3_dev_vipa_add_show(buf, card, QETH_PROT_IPV4);
+       return qeth_l3_dev_ip_add_show(dev, buf, QETH_PROT_IPV4,
+                                      QETH_IP_TYPE_VIPA);
 }
 
 static int qeth_l3_parse_vipae(const char *buf, enum qeth_prot_versions proto,
@@ -784,7 +796,7 @@ static ssize_t qeth_l3_dev_vipa_del_store(const char *buf, size_t count,
        mutex_lock(&card->conf_mutex);
        rc = qeth_l3_parse_vipae(buf, proto, addr);
        if (!rc)
-               qeth_l3_del_vipa(card, proto, addr);
+               rc = qeth_l3_del_vipa(card, proto, addr);
        mutex_unlock(&card->conf_mutex);
        return rc ? rc : count;
 }
@@ -804,14 +816,11 @@ static QETH_DEVICE_ATTR(vipa_del4, del4, 0200, NULL,
                        qeth_l3_dev_vipa_del4_store);
 
 static ssize_t qeth_l3_dev_vipa_add6_show(struct device *dev,
-                               struct device_attribute *attr, char *buf)
+                                         struct device_attribute *attr,
+                                         char *buf)
 {
-       struct qeth_card *card = dev_get_drvdata(dev);
-
-       if (!card)
-               return -EINVAL;
-
-       return qeth_l3_dev_vipa_add_show(buf, card, QETH_PROT_IPV6);
+       return qeth_l3_dev_ip_add_show(dev, buf, QETH_PROT_IPV6,
+                                      QETH_IP_TYPE_VIPA);
 }
 
 static ssize_t qeth_l3_dev_vipa_add6_store(struct device *dev,
@@ -856,48 +865,12 @@ static const struct attribute_group qeth_device_vipa_group = {
        .attrs = qeth_vipa_device_attrs,
 };
 
-static ssize_t qeth_l3_dev_rxip_add_show(char *buf, struct qeth_card *card,
-                      enum qeth_prot_versions proto)
-{
-       struct qeth_ipaddr *ipaddr;
-       char addr_str[40];
-       int str_len = 0;
-       int entry_len; /* length of 1 entry string, differs between v4 and v6 */
-       int i;
-
-       entry_len = (proto == QETH_PROT_IPV4)? 12 : 40;
-       entry_len += 2; /* \n + terminator */
-       spin_lock_bh(&card->ip_lock);
-       hash_for_each(card->ip_htable, i, ipaddr, hnode) {
-               if (ipaddr->proto != proto)
-                       continue;
-               if (ipaddr->type != QETH_IP_TYPE_RXIP)
-                       continue;
-               /* String must not be longer than PAGE_SIZE. So we check if
-                * string length gets near PAGE_SIZE. Then we can savely display
-                * the next IPv6 address (worst case, compared to IPv4) */
-               if ((PAGE_SIZE - str_len) <= entry_len)
-                       break;
-               qeth_l3_ipaddr_to_string(proto, (const u8 *)&ipaddr->u,
-                       addr_str);
-               str_len += snprintf(buf + str_len, PAGE_SIZE - str_len, "%s\n",
-                                   addr_str);
-       }
-       spin_unlock_bh(&card->ip_lock);
-       str_len += snprintf(buf + str_len, PAGE_SIZE - str_len, "\n");
-
-       return str_len;
-}
-
 static ssize_t qeth_l3_dev_rxip_add4_show(struct device *dev,
-                       struct device_attribute *attr, char *buf)
+                                         struct device_attribute *attr,
+                                         char *buf)
 {
-       struct qeth_card *card = dev_get_drvdata(dev);
-
-       if (!card)
-               return -EINVAL;
-
-       return qeth_l3_dev_rxip_add_show(buf, card, QETH_PROT_IPV4);
+       return qeth_l3_dev_ip_add_show(dev, buf, QETH_PROT_IPV4,
+                                      QETH_IP_TYPE_RXIP);
 }
 
 static int qeth_l3_parse_rxipe(const char *buf, enum qeth_prot_versions proto,
@@ -964,7 +937,7 @@ static ssize_t qeth_l3_dev_rxip_del_store(const char *buf, size_t count,
        mutex_lock(&card->conf_mutex);
        rc = qeth_l3_parse_rxipe(buf, proto, addr);
        if (!rc)
-               qeth_l3_del_rxip(card, proto, addr);
+               rc = qeth_l3_del_rxip(card, proto, addr);
        mutex_unlock(&card->conf_mutex);
        return rc ? rc : count;
 }
@@ -984,14 +957,11 @@ static QETH_DEVICE_ATTR(rxip_del4, del4, 0200, NULL,
                        qeth_l3_dev_rxip_del4_store);
 
 static ssize_t qeth_l3_dev_rxip_add6_show(struct device *dev,
-               struct device_attribute *attr, char *buf)
+                                         struct device_attribute *attr,
+                                         char *buf)
 {
-       struct qeth_card *card = dev_get_drvdata(dev);
-
-       if (!card)
-               return -EINVAL;
-
-       return qeth_l3_dev_rxip_add_show(buf, card, QETH_PROT_IPV6);
+       return qeth_l3_dev_ip_add_show(dev, buf, QETH_PROT_IPV6,
+                                      QETH_IP_TYPE_RXIP);
 }
 
 static ssize_t qeth_l3_dev_rxip_add6_store(struct device *dev,
index 6e3d819..d522654 100644 (file)
@@ -1725,6 +1725,7 @@ struct aac_dev
 #define FIB_CONTEXT_FLAG_NATIVE_HBA            (0x00000010)
 #define FIB_CONTEXT_FLAG_NATIVE_HBA_TMF        (0x00000020)
 #define FIB_CONTEXT_FLAG_SCSI_CMD      (0x00000040)
+#define FIB_CONTEXT_FLAG_EH_RESET      (0x00000080)
 
 /*
  *     Define the command values
index bdf127a..d55332d 100644 (file)
@@ -1037,7 +1037,7 @@ static int aac_eh_bus_reset(struct scsi_cmnd* cmd)
                        info = &aac->hba_map[bus][cid];
                        if (bus >= AAC_MAX_BUSES || cid >= AAC_MAX_TARGETS ||
                            info->devtype != AAC_DEVTYPE_NATIVE_RAW) {
-                               fib->flags |= FIB_CONTEXT_FLAG_TIMED_OUT;
+                               fib->flags |= FIB_CONTEXT_FLAG_EH_RESET;
                                cmd->SCp.phase = AAC_OWNER_ERROR_HANDLER;
                        }
                }
index a4f28b7..e188771 100644 (file)
@@ -1576,7 +1576,9 @@ static struct request *_make_request(struct request_queue *q, bool has_write,
                return req;
 
        for_each_bio(bio) {
-               ret = blk_rq_append_bio(req, bio);
+               struct bio *bounce_bio = bio;
+
+               ret = blk_rq_append_bio(req, &bounce_bio);
                if (ret)
                        return ERR_PTR(ret);
        }
index 7d91e53..a980ef7 100644 (file)
@@ -25,15 +25,17 @@ int init_initiator_rw_fcoe_task(struct fcoe_task_params *task_params,
                                u32 task_retry_id,
                                u8 fcp_cmd_payload[32])
 {
-       struct fcoe_task_context *ctx = task_params->context;
+       struct e4_fcoe_task_context *ctx = task_params->context;
+       const u8 val_byte = ctx->ystorm_ag_context.byte0;
+       struct e4_ustorm_fcoe_task_ag_ctx *u_ag_ctx;
        struct ystorm_fcoe_task_st_ctx *y_st_ctx;
        struct tstorm_fcoe_task_st_ctx *t_st_ctx;
-       struct ustorm_fcoe_task_ag_ctx *u_ag_ctx;
        struct mstorm_fcoe_task_st_ctx *m_st_ctx;
        u32 io_size, val;
        bool slow_sgl;
 
        memset(ctx, 0, sizeof(*(ctx)));
+       ctx->ystorm_ag_context.byte0 = val_byte;
        slow_sgl = scsi_is_slow_sgl(sgl_task_params->num_sges,
                                    sgl_task_params->small_mid_sge);
        io_size = (task_params->task_type == FCOE_TASK_TYPE_WRITE_INITIATOR ?
@@ -43,20 +45,20 @@ int init_initiator_rw_fcoe_task(struct fcoe_task_params *task_params,
        y_st_ctx = &ctx->ystorm_st_context;
        y_st_ctx->data_2_trns_rem = cpu_to_le32(io_size);
        y_st_ctx->task_rety_identifier = cpu_to_le32(task_retry_id);
-       y_st_ctx->task_type = task_params->task_type;
+       y_st_ctx->task_type = (u8)task_params->task_type;
        memcpy(&y_st_ctx->tx_info_union.fcp_cmd_payload,
               fcp_cmd_payload, sizeof(struct fcoe_fcp_cmd_payload));
 
        /* Tstorm ctx */
        t_st_ctx = &ctx->tstorm_st_context;
-       t_st_ctx->read_only.dev_type = (task_params->is_tape_device == 1 ?
-                                       FCOE_TASK_DEV_TYPE_TAPE :
-                                       FCOE_TASK_DEV_TYPE_DISK);
+       t_st_ctx->read_only.dev_type = (u8)(task_params->is_tape_device == 1 ?
+                                           FCOE_TASK_DEV_TYPE_TAPE :
+                                           FCOE_TASK_DEV_TYPE_DISK);
        t_st_ctx->read_only.cid = cpu_to_le32(task_params->conn_cid);
        val = cpu_to_le32(task_params->cq_rss_number);
        t_st_ctx->read_only.glbl_q_num = val;
        t_st_ctx->read_only.fcp_cmd_trns_size = cpu_to_le32(io_size);
-       t_st_ctx->read_only.task_type = task_params->task_type;
+       t_st_ctx->read_only.task_type = (u8)task_params->task_type;
        SET_FIELD(t_st_ctx->read_write.flags,
                  FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME, 1);
        t_st_ctx->read_write.rx_id = cpu_to_le16(FCOE_RX_ID);
@@ -88,6 +90,8 @@ int init_initiator_rw_fcoe_task(struct fcoe_task_params *task_params,
                SET_FIELD(m_st_ctx->flags,
                          MSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE,
                          (slow_sgl ? SCSI_TX_SLOW_SGL : SCSI_FAST_SGL));
+               m_st_ctx->sgl_params.sgl_num_sges =
+                       cpu_to_le16(sgl_task_params->num_sges);
        } else {
                /* Tstorm ctx */
                SET_FIELD(t_st_ctx->read_write.flags,
@@ -101,7 +105,9 @@ int init_initiator_rw_fcoe_task(struct fcoe_task_params *task_params,
                                      sgl_task_params);
        }
 
+       /* Init Sqe */
        init_common_sqe(task_params, SEND_FCOE_CMD);
+
        return 0;
 }
 
@@ -112,14 +118,16 @@ int init_initiator_midpath_unsolicited_fcoe_task(
        struct scsi_sgl_task_params *rx_sgl_task_params,
        u8 fw_to_place_fc_header)
 {
-       struct fcoe_task_context *ctx = task_params->context;
+       struct e4_fcoe_task_context *ctx = task_params->context;
+       const u8 val_byte = ctx->ystorm_ag_context.byte0;
+       struct e4_ustorm_fcoe_task_ag_ctx *u_ag_ctx;
        struct ystorm_fcoe_task_st_ctx *y_st_ctx;
        struct tstorm_fcoe_task_st_ctx *t_st_ctx;
-       struct ustorm_fcoe_task_ag_ctx *u_ag_ctx;
        struct mstorm_fcoe_task_st_ctx *m_st_ctx;
        u32 val;
 
        memset(ctx, 0, sizeof(*(ctx)));
+       ctx->ystorm_ag_context.byte0 = val_byte;
 
        /* Init Ystorm */
        y_st_ctx = &ctx->ystorm_st_context;
@@ -129,7 +137,7 @@ int init_initiator_midpath_unsolicited_fcoe_task(
        SET_FIELD(y_st_ctx->sgl_mode,
                  YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE, SCSI_FAST_SGL);
        y_st_ctx->data_2_trns_rem = cpu_to_le32(task_params->tx_io_size);
-       y_st_ctx->task_type = task_params->task_type;
+       y_st_ctx->task_type = (u8)task_params->task_type;
        memcpy(&y_st_ctx->tx_info_union.tx_params.mid_path,
               mid_path_fc_header, sizeof(struct fcoe_tx_mid_path_params));
 
@@ -148,7 +156,7 @@ int init_initiator_midpath_unsolicited_fcoe_task(
        t_st_ctx->read_only.cid = cpu_to_le32(task_params->conn_cid);
        val = cpu_to_le32(task_params->cq_rss_number);
        t_st_ctx->read_only.glbl_q_num = val;
-       t_st_ctx->read_only.task_type = task_params->task_type;
+       t_st_ctx->read_only.task_type = (u8)task_params->task_type;
        SET_FIELD(t_st_ctx->read_write.flags,
                  FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME, 1);
        t_st_ctx->read_write.rx_id = cpu_to_le16(FCOE_RX_ID);
@@ -182,9 +190,10 @@ int init_initiator_cleanup_fcoe_task(struct fcoe_task_params *task_params)
 }
 
 int init_initiator_sequence_recovery_fcoe_task(
-       struct fcoe_task_params *task_params, u32 off)
+       struct fcoe_task_params *task_params, u32 desired_offset)
 {
        init_common_sqe(task_params, FCOE_SEQUENCE_RECOVERY);
-       task_params->sqe->additional_info_union.seq_rec_updated_offset = off;
+       task_params->sqe->additional_info_union.seq_rec_updated_offset =
+                                                               desired_offset;
        return 0;
 }
index f9c50fa..b5c236e 100644 (file)
@@ -13,7 +13,7 @@
 
 struct fcoe_task_params {
        /* Output parameter [set/filled by the HSI function] */
-       struct fcoe_task_context *context;
+       struct e4_fcoe_task_context *context;
 
        /* Output parameter [set/filled by the HSI function] */
        struct fcoe_wqe *sqe;
index 9bf7b22..c105a2e 100644 (file)
@@ -129,7 +129,7 @@ struct qedf_ioreq {
        struct delayed_work timeout_work;
        struct completion tm_done;
        struct completion abts_done;
-       struct fcoe_task_context *task;
+       struct e4_fcoe_task_context *task;
        struct fcoe_task_params *task_params;
        struct scsi_sgl_task_params *sgl_task_params;
        int idx;
@@ -465,7 +465,7 @@ extern void qedf_cmd_timer_set(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
        unsigned int timer_msec);
 extern int qedf_init_mp_req(struct qedf_ioreq *io_req);
 extern void qedf_init_mp_task(struct qedf_ioreq *io_req,
-       struct fcoe_task_context *task_ctx, struct fcoe_wqe *wqe);
+       struct e4_fcoe_task_context *task_ctx, struct fcoe_wqe *sqe);
 extern u16 qedf_get_sqe_idx(struct qedf_rport *fcport);
 extern void qedf_ring_doorbell(struct qedf_rport *fcport);
 extern void qedf_process_els_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
index 59c18ca..aa22b11 100644 (file)
@@ -19,7 +19,7 @@ static int qedf_initiate_els(struct qedf_rport *fcport, unsigned int op,
        struct qedf_ioreq *els_req;
        struct qedf_mp_req *mp_req;
        struct fc_frame_header *fc_hdr;
-       struct fcoe_task_context *task;
+       struct e4_fcoe_task_context *task;
        int rc = 0;
        uint32_t did, sid;
        uint16_t xid;
index 7faef80..503c1ae 100644 (file)
@@ -225,19 +225,6 @@ enum fcoe_cqe_type {
        MAX_FCOE_CQE_TYPE
 };
 
-
-/*
- * FCoE device type
- */
-enum fcoe_device_type {
-       FCOE_TASK_DEV_TYPE_DISK,
-       FCOE_TASK_DEV_TYPE_TAPE,
-       MAX_FCOE_DEVICE_TYPE
-};
-
-
-
-
 /*
  * FCoE fast path error codes
  */
@@ -332,31 +319,6 @@ enum fcoe_sp_error_code {
        MAX_FCOE_SP_ERROR_CODE
 };
 
-
-/*
- * FCoE SQE request type
- */
-enum fcoe_sqe_request_type {
-       SEND_FCOE_CMD,
-       SEND_FCOE_MIDPATH,
-       SEND_FCOE_ABTS_REQUEST,
-       FCOE_EXCHANGE_CLEANUP,
-       FCOE_SEQUENCE_RECOVERY,
-       SEND_FCOE_XFER_RDY,
-       SEND_FCOE_RSP,
-       SEND_FCOE_RSP_WITH_SENSE_DATA,
-       SEND_FCOE_TARGET_DATA,
-       SEND_FCOE_INITIATOR_DATA,
-       /*
-        * Xfer Continuation (==1) ready to be sent. Previous XFERs data
-        * received successfully.
-        */
-       SEND_FCOE_XFER_CONTINUATION_RDY,
-       SEND_FCOE_TARGET_ABTS_RSP,
-       MAX_FCOE_SQE_REQUEST_TYPE
-};
-
-
 /*
  * FCoE task TX state
  */
@@ -389,34 +351,4 @@ enum fcoe_task_tx_state {
        MAX_FCOE_TASK_TX_STATE
 };
 
-
-/*
- * FCoE task type
- */
-enum fcoe_task_type {
-       FCOE_TASK_TYPE_WRITE_INITIATOR,
-       FCOE_TASK_TYPE_READ_INITIATOR,
-       FCOE_TASK_TYPE_MIDPATH,
-       FCOE_TASK_TYPE_UNSOLICITED,
-       FCOE_TASK_TYPE_ABTS,
-       FCOE_TASK_TYPE_EXCHANGE_CLEANUP,
-       FCOE_TASK_TYPE_SEQUENCE_CLEANUP,
-       FCOE_TASK_TYPE_WRITE_TARGET,
-       FCOE_TASK_TYPE_READ_TARGET,
-       FCOE_TASK_TYPE_RSP,
-       FCOE_TASK_TYPE_RSP_SENSE_DATA,
-       FCOE_TASK_TYPE_ABTS_TARGET,
-       FCOE_TASK_TYPE_ENUM_SIZE,
-       MAX_FCOE_TASK_TYPE
-};
-
-struct scsi_glbl_queue_entry {
-       /* Start physical address for the RQ (receive queue) PBL. */
-       struct regpair rq_pbl_addr;
-       /* Start physical address for the CQ (completion queue) PBL. */
-       struct regpair cq_pbl_addr;
-       /* Start physical address for the CMDQ (command queue) PBL. */
-       struct regpair cmdq_pbl_addr;
-};
-
 #endif /* __QEDF_HSI__ */
index ded3860..b15e695 100644 (file)
@@ -579,7 +579,7 @@ static void qedf_build_fcp_cmnd(struct qedf_ioreq *io_req,
 }
 
 static void  qedf_init_task(struct qedf_rport *fcport, struct fc_lport *lport,
-       struct qedf_ioreq *io_req, struct fcoe_task_context *task_ctx,
+       struct qedf_ioreq *io_req, struct e4_fcoe_task_context *task_ctx,
        struct fcoe_wqe *sqe)
 {
        enum fcoe_task_type task_type;
@@ -597,7 +597,7 @@ static void  qedf_init_task(struct qedf_rport *fcport, struct fc_lport *lport,
 
        /* Note init_initiator_rw_fcoe_task memsets the task context */
        io_req->task = task_ctx;
-       memset(task_ctx, 0, sizeof(struct fcoe_task_context));
+       memset(task_ctx, 0, sizeof(struct e4_fcoe_task_context));
        memset(io_req->task_params, 0, sizeof(struct fcoe_task_params));
        memset(io_req->sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
 
@@ -673,7 +673,7 @@ static void  qedf_init_task(struct qedf_rport *fcport, struct fc_lport *lport,
 }
 
 void qedf_init_mp_task(struct qedf_ioreq *io_req,
-       struct fcoe_task_context *task_ctx, struct fcoe_wqe *sqe)
+       struct e4_fcoe_task_context *task_ctx, struct fcoe_wqe *sqe)
 {
        struct qedf_mp_req *mp_req = &(io_req->mp_req);
        struct qedf_rport *fcport = io_req->fcport;
@@ -691,7 +691,7 @@ void qedf_init_mp_task(struct qedf_ioreq *io_req,
 
        memset(&tx_sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
        memset(&rx_sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
-       memset(task_ctx, 0, sizeof(struct fcoe_task_context));
+       memset(task_ctx, 0, sizeof(struct e4_fcoe_task_context));
        memset(&task_fc_hdr, 0, sizeof(struct fcoe_tx_mid_path_params));
 
        /* Setup the task from io_req for easy reference */
@@ -844,7 +844,7 @@ int qedf_post_io_req(struct qedf_rport *fcport, struct qedf_ioreq *io_req)
        struct Scsi_Host *host = sc_cmd->device->host;
        struct fc_lport *lport = shost_priv(host);
        struct qedf_ctx *qedf = lport_priv(lport);
-       struct fcoe_task_context *task_ctx;
+       struct e4_fcoe_task_context *task_ctx;
        u16 xid;
        enum fcoe_task_type req_type = 0;
        struct fcoe_wqe *sqe;
@@ -1065,7 +1065,7 @@ void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
        struct qedf_ioreq *io_req)
 {
        u16 xid, rval;
-       struct fcoe_task_context *task_ctx;
+       struct e4_fcoe_task_context *task_ctx;
        struct scsi_cmnd *sc_cmd;
        struct fcoe_cqe_rsp_info *fcp_rsp;
        struct qedf_rport *fcport;
@@ -1722,7 +1722,7 @@ int qedf_initiate_cleanup(struct qedf_ioreq *io_req,
        struct qedf_rport *fcport;
        struct qedf_ctx *qedf;
        uint16_t xid;
-       struct fcoe_task_context *task;
+       struct e4_fcoe_task_context *task;
        int tmo = 0;
        int rc = SUCCESS;
        unsigned long flags;
@@ -1835,7 +1835,7 @@ static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd,
        uint8_t tm_flags)
 {
        struct qedf_ioreq *io_req;
-       struct fcoe_task_context *task;
+       struct e4_fcoe_task_context *task;
        struct qedf_ctx *qedf = fcport->qedf;
        struct fc_lport *lport = qedf->lport;
        int rc = 0;
@@ -2005,17 +2005,18 @@ void qedf_process_unsol_compl(struct qedf_ctx *qedf, uint16_t que_idx,
        struct qedf_io_work *io_work;
        u32 bdq_idx;
        void *bdq_addr;
+       struct scsi_bd *p_bd_info;
 
+       p_bd_info = &cqe->cqe_info.unsolic_info.bd_info;
        QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_UNSOL,
-           "address.hi=%x address.lo=%x opaque_data.hi=%x "
-           "opaque_data.lo=%x bdq_prod_idx=%u len=%u.\n",
-           le32_to_cpu(cqe->cqe_info.unsolic_info.bd_info.address.hi),
-           le32_to_cpu(cqe->cqe_info.unsolic_info.bd_info.address.lo),
-           le32_to_cpu(cqe->cqe_info.unsolic_info.bd_info.opaque.hi),
-           le32_to_cpu(cqe->cqe_info.unsolic_info.bd_info.opaque.lo),
-           qedf->bdq_prod_idx, pktlen);
-
-       bdq_idx = le32_to_cpu(cqe->cqe_info.unsolic_info.bd_info.opaque.lo);
+                 "address.hi=%x, address.lo=%x, opaque_data.hi=%x, opaque_data.lo=%x, bdq_prod_idx=%u, len=%u\n",
+                 le32_to_cpu(p_bd_info->address.hi),
+                 le32_to_cpu(p_bd_info->address.lo),
+                 le32_to_cpu(p_bd_info->opaque.fcoe_opaque.hi),
+                 le32_to_cpu(p_bd_info->opaque.fcoe_opaque.lo),
+                 qedf->bdq_prod_idx, pktlen);
+
+       bdq_idx = le32_to_cpu(p_bd_info->opaque.fcoe_opaque.lo);
        if (bdq_idx >= QEDF_BDQ_SIZE) {
                QEDF_ERR(&(qedf->dbg_ctx), "bdq_idx is out of range %d.\n",
                    bdq_idx);
index 7c00645..40800dd 100644 (file)
@@ -1860,7 +1860,7 @@ static bool qedf_fp_has_work(struct qedf_fastpath *fp)
        struct qedf_ctx *qedf = fp->qedf;
        struct global_queue *que;
        struct qed_sb_info *sb_info = fp->sb_info;
-       struct status_block *sb = sb_info->sb_virt;
+       struct status_block_e4 *sb = sb_info->sb_virt;
        u16 prod_idx;
 
        /* Get the pointer to the global CQ this completion is on */
@@ -1887,7 +1887,7 @@ static bool qedf_process_completions(struct qedf_fastpath *fp)
 {
        struct qedf_ctx *qedf = fp->qedf;
        struct qed_sb_info *sb_info = fp->sb_info;
-       struct status_block *sb = sb_info->sb_virt;
+       struct status_block_e4 *sb = sb_info->sb_virt;
        struct global_queue *que;
        u16 prod_idx;
        struct fcoe_cqe *cqe;
@@ -2352,12 +2352,12 @@ void qedf_fp_io_handler(struct work_struct *work)
 static int qedf_alloc_and_init_sb(struct qedf_ctx *qedf,
        struct qed_sb_info *sb_info, u16 sb_id)
 {
-       struct status_block *sb_virt;
+       struct status_block_e4 *sb_virt;
        dma_addr_t sb_phys;
        int ret;
 
        sb_virt = dma_alloc_coherent(&qedf->pdev->dev,
-           sizeof(struct status_block), &sb_phys, GFP_KERNEL);
+           sizeof(struct status_block_e4), &sb_phys, GFP_KERNEL);
 
        if (!sb_virt) {
                QEDF_ERR(&(qedf->dbg_ctx), "Status block allocation failed "
@@ -2623,9 +2623,9 @@ static int qedf_alloc_bdq(struct qedf_ctx *qedf)
        for (i = 0; i < QEDF_BDQ_SIZE; i++) {
                pbl->address.hi = cpu_to_le32(U64_HI(qedf->bdq[i].buf_dma));
                pbl->address.lo = cpu_to_le32(U64_LO(qedf->bdq[i].buf_dma));
-               pbl->opaque.hi = 0;
+               pbl->opaque.fcoe_opaque.hi = 0;
                /* Opaque lo data is an index into the BDQ array */
-               pbl->opaque.lo = cpu_to_le32(i);
+               pbl->opaque.fcoe_opaque.lo = cpu_to_le32(i);
                pbl++;
        }
 
index 397b3b8..c247805 100644 (file)
@@ -7,9 +7,9 @@
  *  this source tree.
  */
 
-#define QEDF_VERSION           "8.20.5.0"
+#define QEDF_VERSION           "8.33.0.20"
 #define QEDF_DRIVER_MAJOR_VER          8
-#define QEDF_DRIVER_MINOR_VER          20
-#define QEDF_DRIVER_REV_VER            5
-#define QEDF_DRIVER_ENG_VER            0
+#define QEDF_DRIVER_MINOR_VER          33
+#define QEDF_DRIVER_REV_VER            0
+#define QEDF_DRIVER_ENG_VER            20
 
index 39d7781..fd8a1ee 100644 (file)
@@ -152,7 +152,7 @@ qedi_gbl_ctx_show(struct seq_file *s, void *unused)
 {
        struct qedi_fastpath *fp = NULL;
        struct qed_sb_info *sb_info = NULL;
-       struct status_block *sb = NULL;
+       struct status_block_e4 *sb = NULL;
        struct global_queue *que = NULL;
        int id;
        u16 prod_idx;
@@ -168,7 +168,7 @@ qedi_gbl_ctx_show(struct seq_file *s, void *unused)
                sb_info = fp->sb_info;
                sb = sb_info->sb_virt;
                prod_idx = (sb->pi_array[QEDI_PROTO_CQ_PROD_IDX] &
-                           STATUS_BLOCK_PROD_INDEX_MASK);
+                           STATUS_BLOCK_E4_PROD_INDEX_MASK);
                seq_printf(s, "SB PROD IDX: %d\n", prod_idx);
                que = qedi->global_queues[fp->sb_id];
                seq_printf(s, "DRV CONS IDX: %d\n", que->cq_cons_idx);
index bd302d3..092e8f9 100644 (file)
@@ -87,7 +87,7 @@ static void qedi_process_text_resp(struct qedi_ctx *qedi,
 {
        struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
        struct iscsi_session *session = conn->session;
-       struct iscsi_task_context *task_ctx;
+       struct e4_iscsi_task_context *task_ctx;
        struct iscsi_text_rsp *resp_hdr_ptr;
        struct iscsi_text_response_hdr *cqe_text_response;
        struct qedi_cmd *cmd;
@@ -260,7 +260,7 @@ static void qedi_process_login_resp(struct qedi_ctx *qedi,
 {
        struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
        struct iscsi_session *session = conn->session;
-       struct iscsi_task_context *task_ctx;
+       struct e4_iscsi_task_context *task_ctx;
        struct iscsi_login_rsp *resp_hdr_ptr;
        struct iscsi_login_response_hdr *cqe_login_response;
        struct qedi_cmd *cmd;
@@ -326,7 +326,7 @@ static void qedi_get_rq_bdq_buf(struct qedi_ctx *qedi,
                  (qedi->bdq_prod_idx % qedi->rq_num_entries));
 
        /* Obtain buffer address from rqe_opaque */
-       idx = cqe->rqe_opaque.lo;
+       idx = cqe->rqe_opaque;
        if (idx > (QEDI_BDQ_NUM - 1)) {
                QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
                          "wrong idx %d returned by FW, dropping the unsolicited pkt\n",
@@ -335,8 +335,7 @@ static void qedi_get_rq_bdq_buf(struct qedi_ctx *qedi,
        }
 
        QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
-                 "rqe_opaque.lo [0x%p], rqe_opaque.hi [0x%p], idx [%d]\n",
-                 cqe->rqe_opaque.lo, cqe->rqe_opaque.hi, idx);
+                 "rqe_opaque [0x%p], idx [%d]\n", cqe->rqe_opaque, idx);
 
        QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
                  "unsol_cqe_type = %d\n", cqe->unsol_cqe_type);
@@ -363,7 +362,7 @@ static void qedi_put_rq_bdq_buf(struct qedi_ctx *qedi,
        struct scsi_bd *pbl;
 
        /* Obtain buffer address from rqe_opaque */
-       idx = cqe->rqe_opaque.lo;
+       idx = cqe->rqe_opaque;
        if (idx > (QEDI_BDQ_NUM - 1)) {
                QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
                          "wrong idx %d returned by FW, dropping the unsolicited pkt\n",
@@ -378,8 +377,10 @@ static void qedi_put_rq_bdq_buf(struct qedi_ctx *qedi,
        QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
                  "pbl [0x%p] pbl->address hi [0x%llx] lo [0x%llx] idx [%d]\n",
                  pbl, pbl->address.hi, pbl->address.lo, idx);
-       pbl->opaque.hi = 0;
-       pbl->opaque.lo = cpu_to_le32(QEDI_U64_LO(idx));
+       pbl->opaque.iscsi_opaque.reserved_zero[0] = 0;
+       pbl->opaque.iscsi_opaque.reserved_zero[1] = 0;
+       pbl->opaque.iscsi_opaque.reserved_zero[2] = 0;
+       pbl->opaque.iscsi_opaque.opaque = cpu_to_le32(idx);
 
        /* Increment producer to let f/w know we've handled the frame */
        qedi->bdq_prod_idx += count;
@@ -1017,7 +1018,7 @@ int qedi_send_iscsi_login(struct qedi_conn *qedi_conn,
        struct scsi_sgl_task_params tx_sgl_task_params;
        struct scsi_sgl_task_params rx_sgl_task_params;
        struct iscsi_task_params task_params;
-       struct iscsi_task_context *fw_task_ctx;
+       struct e4_iscsi_task_context *fw_task_ctx;
        struct qedi_ctx *qedi = qedi_conn->qedi;
        struct iscsi_login_req *login_hdr;
        struct scsi_sge *resp_sge = NULL;
@@ -1037,8 +1038,9 @@ int qedi_send_iscsi_login(struct qedi_conn *qedi_conn,
                return -ENOMEM;
 
        fw_task_ctx =
-            (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid);
-       memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
+            (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
+                                                              tid);
+       memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context));
 
        qedi_cmd->task_id = tid;
 
@@ -1119,7 +1121,7 @@ int qedi_send_iscsi_logout(struct qedi_conn *qedi_conn,
        struct scsi_sgl_task_params tx_sgl_task_params;
        struct scsi_sgl_task_params rx_sgl_task_params;
        struct iscsi_task_params task_params;
-       struct iscsi_task_context *fw_task_ctx;
+       struct e4_iscsi_task_context *fw_task_ctx;
        struct iscsi_logout *logout_hdr = NULL;
        struct qedi_ctx *qedi = qedi_conn->qedi;
        struct qedi_cmd *qedi_cmd;
@@ -1137,8 +1139,9 @@ int qedi_send_iscsi_logout(struct qedi_conn *qedi_conn,
                return -ENOMEM;
 
        fw_task_ctx =
-            (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid);
-       memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
+            (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
+                                                              tid);
+       memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context));
 
        qedi_cmd->task_id = tid;
 
@@ -1467,7 +1470,7 @@ static int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn,
        struct iscsi_tmf_request_hdr tmf_pdu_header;
        struct iscsi_task_params task_params;
        struct qedi_ctx *qedi = qedi_conn->qedi;
-       struct iscsi_task_context *fw_task_ctx;
+       struct e4_iscsi_task_context *fw_task_ctx;
        struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
        struct iscsi_task *ctask;
        struct iscsi_tm *tmf_hdr;
@@ -1490,8 +1493,9 @@ static int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn,
                return -ENOMEM;
 
        fw_task_ctx =
-            (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid);
-       memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
+            (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
+                                                              tid);
+       memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context));
 
        qedi_cmd->task_id = tid;
 
@@ -1605,7 +1609,7 @@ int qedi_send_iscsi_text(struct qedi_conn *qedi_conn,
        struct scsi_sgl_task_params tx_sgl_task_params;
        struct scsi_sgl_task_params rx_sgl_task_params;
        struct iscsi_task_params task_params;
-       struct iscsi_task_context *fw_task_ctx;
+       struct e4_iscsi_task_context *fw_task_ctx;
        struct qedi_ctx *qedi = qedi_conn->qedi;
        struct iscsi_text *text_hdr;
        struct scsi_sge *req_sge = NULL;
@@ -1627,8 +1631,9 @@ int qedi_send_iscsi_text(struct qedi_conn *qedi_conn,
                return -ENOMEM;
 
        fw_task_ctx =
-            (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid);
-       memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
+            (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
+                                                              tid);
+       memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context));
 
        qedi_cmd->task_id = tid;
 
@@ -1705,7 +1710,7 @@ int qedi_send_iscsi_nopout(struct qedi_conn *qedi_conn,
        struct scsi_sgl_task_params rx_sgl_task_params;
        struct iscsi_task_params task_params;
        struct qedi_ctx *qedi = qedi_conn->qedi;
-       struct iscsi_task_context *fw_task_ctx;
+       struct e4_iscsi_task_context *fw_task_ctx;
        struct iscsi_nopout *nopout_hdr;
        struct scsi_sge *resp_sge = NULL;
        struct qedi_cmd *qedi_cmd;
@@ -1725,8 +1730,9 @@ int qedi_send_iscsi_nopout(struct qedi_conn *qedi_conn,
                return -ENOMEM;
 
        fw_task_ctx =
-            (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid);
-       memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
+            (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
+                                                              tid);
+       memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context));
 
        qedi_cmd->task_id = tid;
 
@@ -2046,7 +2052,7 @@ int qedi_iscsi_send_ioreq(struct iscsi_task *task)
        struct iscsi_task_params task_params;
        struct iscsi_conn_params conn_params;
        struct scsi_initiator_cmd_params cmd_params;
-       struct iscsi_task_context *fw_task_ctx;
+       struct e4_iscsi_task_context *fw_task_ctx;
        struct iscsi_cls_conn *cls_conn;
        struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)task->hdr;
        enum iscsi_task_type task_type = MAX_ISCSI_TASK_TYPE;
@@ -2069,8 +2075,9 @@ int qedi_iscsi_send_ioreq(struct iscsi_task *task)
                return -ENOMEM;
 
        fw_task_ctx =
-            (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid);
-       memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
+            (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
+                                                              tid);
+       memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context));
 
        cmd->task_id = tid;
 
index 7df32a6..a269da1 100644 (file)
@@ -203,12 +203,15 @@ static void init_default_iscsi_task(struct iscsi_task_params *task_params,
                                    struct data_hdr *pdu_header,
                                    enum iscsi_task_type task_type)
 {
-       struct iscsi_task_context *context;
-       u16 index;
+       struct e4_iscsi_task_context *context;
        u32 val;
+       u16 index;
+       u8 val_byte;
 
        context = task_params->context;
+       val_byte = context->mstorm_ag_context.cdu_validation;
        memset(context, 0, sizeof(*context));
+       context->mstorm_ag_context.cdu_validation = val_byte;
 
        for (index = 0; index <
             ARRAY_SIZE(context->ystorm_st_context.pdu_hdr.data.data);
@@ -222,7 +225,7 @@ static void init_default_iscsi_task(struct iscsi_task_params *task_params,
                                            cpu_to_le16(task_params->conn_icid);
 
        SET_FIELD(context->ustorm_ag_context.flags1,
-                 USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
+                 E4_USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
 
        context->ustorm_st_context.task_type = task_type;
        context->ustorm_st_context.cq_rss_number = task_params->cq_rss_number;
@@ -252,10 +255,9 @@ void init_initiator_rw_cdb_ystorm_context(struct ystorm_iscsi_task_st_ctx *ystc,
 
 static
 void init_ustorm_task_contexts(struct ustorm_iscsi_task_st_ctx *ustorm_st_cxt,
-                              struct ustorm_iscsi_task_ag_ctx *ustorm_ag_cxt,
-                              u32 remaining_recv_len,
-                              u32 expected_data_transfer_len,
-                              u8 num_sges, bool tx_dif_conn_err_en)
+                       struct e4_ustorm_iscsi_task_ag_ctx *ustorm_ag_cxt,
+                       u32 remaining_recv_len, u32 expected_data_transfer_len,
+                       u8 num_sges, bool tx_dif_conn_err_en)
 {
        u32 val;
 
@@ -265,12 +267,12 @@ void init_ustorm_task_contexts(struct ustorm_iscsi_task_st_ctx *ustorm_st_cxt,
        ustorm_st_cxt->exp_data_transfer_len = val;
        SET_FIELD(ustorm_st_cxt->reg1.reg1_map, ISCSI_REG1_NUM_SGES, num_sges);
        SET_FIELD(ustorm_ag_cxt->flags2,
-                 USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN,
+                 E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN,
                  tx_dif_conn_err_en ? 1 : 0);
 }
 
 static
-void set_rw_exp_data_acked_and_cont_len(struct iscsi_task_context *context,
+void set_rw_exp_data_acked_and_cont_len(struct e4_iscsi_task_context *context,
                                        struct iscsi_conn_params  *conn_params,
                                        enum iscsi_task_type task_type,
                                        u32 task_size,
@@ -342,56 +344,57 @@ void init_rtdif_task_context(struct rdif_task_context *rdif_context,
                             cpu_to_le16(dif_task_params->application_tag_mask);
                SET_FIELD(rdif_context->flags0, RDIF_TASK_CONTEXT_CRC_SEED,
                          dif_task_params->crc_seed ? 1 : 0);
-               SET_FIELD(rdif_context->flags0, RDIF_TASK_CONTEXT_HOSTGUARDTYPE,
+               SET_FIELD(rdif_context->flags0,
+                         RDIF_TASK_CONTEXT_HOST_GUARD_TYPE,
                          dif_task_params->host_guard_type);
                SET_FIELD(rdif_context->flags0,
-                         RDIF_TASK_CONTEXT_PROTECTIONTYPE,
+                         RDIF_TASK_CONTEXT_PROTECTION_TYPE,
                          dif_task_params->protection_type);
                SET_FIELD(rdif_context->flags0,
-                         RDIF_TASK_CONTEXT_INITIALREFTAGVALID, 1);
+                         RDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID, 1);
                SET_FIELD(rdif_context->flags0,
-                         RDIF_TASK_CONTEXT_KEEPREFTAGCONST,
+                         RDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST,
                          dif_task_params->keep_ref_tag_const ? 1 : 0);
                SET_FIELD(rdif_context->flags1,
-                         RDIF_TASK_CONTEXT_VALIDATEAPPTAG,
+                         RDIF_TASK_CONTEXT_VALIDATE_APP_TAG,
                          (dif_task_params->validate_app_tag &&
                          dif_task_params->dif_on_network) ? 1 : 0);
                SET_FIELD(rdif_context->flags1,
-                         RDIF_TASK_CONTEXT_VALIDATEGUARD,
+                         RDIF_TASK_CONTEXT_VALIDATE_GUARD,
                          (dif_task_params->validate_guard &&
                          dif_task_params->dif_on_network) ? 1 : 0);
                SET_FIELD(rdif_context->flags1,
-                         RDIF_TASK_CONTEXT_VALIDATEREFTAG,
+                         RDIF_TASK_CONTEXT_VALIDATE_REF_TAG,
                          (dif_task_params->validate_ref_tag &&
                          dif_task_params->dif_on_network) ? 1 : 0);
                SET_FIELD(rdif_context->flags1,
-                         RDIF_TASK_CONTEXT_HOSTINTERFACE,
+                         RDIF_TASK_CONTEXT_HOST_INTERFACE,
                          dif_task_params->dif_on_host ? 1 : 0);
                SET_FIELD(rdif_context->flags1,
-                         RDIF_TASK_CONTEXT_NETWORKINTERFACE,
+                         RDIF_TASK_CONTEXT_NETWORK_INTERFACE,
                          dif_task_params->dif_on_network ? 1 : 0);
                SET_FIELD(rdif_context->flags1,
-                         RDIF_TASK_CONTEXT_FORWARDGUARD,
+                         RDIF_TASK_CONTEXT_FORWARD_GUARD,
                          dif_task_params->forward_guard ? 1 : 0);
                SET_FIELD(rdif_context->flags1,
-                         RDIF_TASK_CONTEXT_FORWARDAPPTAG,
+                         RDIF_TASK_CONTEXT_FORWARD_APP_TAG,
                          dif_task_params->forward_app_tag ? 1 : 0);
                SET_FIELD(rdif_context->flags1,
-                         RDIF_TASK_CONTEXT_FORWARDREFTAG,
+                         RDIF_TASK_CONTEXT_FORWARD_REF_TAG,
                          dif_task_params->forward_ref_tag ? 1 : 0);
                SET_FIELD(rdif_context->flags1,
-                         RDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK,
+                         RDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK,
                          dif_task_params->forward_app_tag_with_mask ? 1 : 0);
                SET_FIELD(rdif_context->flags1,
-                         RDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK,
+                         RDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK,
                          dif_task_params->forward_ref_tag_with_mask ? 1 : 0);
                SET_FIELD(rdif_context->flags1,
-                         RDIF_TASK_CONTEXT_INTERVALSIZE,
+                         RDIF_TASK_CONTEXT_INTERVAL_SIZE,
                          dif_task_params->dif_block_size_log - 9);
                SET_FIELD(rdif_context->state,
-                         RDIF_TASK_CONTEXT_REFTAGMASK,
+                         RDIF_TASK_CONTEXT_REF_TAG_MASK,
                          dif_task_params->ref_tag_mask);
-               SET_FIELD(rdif_context->state, RDIF_TASK_CONTEXT_IGNOREAPPTAG,
+               SET_FIELD(rdif_context->state, RDIF_TASK_CONTEXT_IGNORE_APP_TAG,
                          dif_task_params->ignore_app_tag);
        }
 
@@ -399,7 +402,7 @@ void init_rtdif_task_context(struct rdif_task_context *rdif_context,
            task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE) {
                tdif_context->app_tag_value =
                                  cpu_to_le16(dif_task_params->application_tag);
-               tdif_context->partial_crc_valueB =
+               tdif_context->partial_crc_value_b =
                       cpu_to_le16(dif_task_params->crc_seed ? 0xffff : 0x0000);
                tdif_context->partial_crc_value_a =
                       cpu_to_le16(dif_task_params->crc_seed ? 0xffff : 0x0000);
@@ -407,64 +410,68 @@ void init_rtdif_task_context(struct rdif_task_context *rdif_context,
                          dif_task_params->crc_seed ? 1 : 0);
 
                SET_FIELD(tdif_context->flags0,
-                         TDIF_TASK_CONTEXT_SETERRORWITHEOP,
+                         TDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP,
                          dif_task_params->tx_dif_conn_err_en ? 1 : 0);
-               SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_FORWARDGUARD,
+               SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_FORWARD_GUARD,
                          dif_task_params->forward_guard   ? 1 : 0);
-               SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_FORWARDAPPTAG,
+               SET_FIELD(tdif_context->flags1,
+                         TDIF_TASK_CONTEXT_FORWARD_APP_TAG,
                          dif_task_params->forward_app_tag ? 1 : 0);
-               SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_FORWARDREFTAG,
+               SET_FIELD(tdif_context->flags1,
+                         TDIF_TASK_CONTEXT_FORWARD_REF_TAG,
                          dif_task_params->forward_ref_tag ? 1 : 0);
-               SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_INTERVALSIZE,
+               SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_INTERVAL_SIZE,
                          dif_task_params->dif_block_size_log - 9);
-               SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_HOSTINTERFACE,
+               SET_FIELD(tdif_context->flags1,
+                         TDIF_TASK_CONTEXT_HOST_INTERFACE,
                          dif_task_params->dif_on_host    ? 1 : 0);
                SET_FIELD(tdif_context->flags1,
-                         TDIF_TASK_CONTEXT_NETWORKINTERFACE,
+                         TDIF_TASK_CONTEXT_NETWORK_INTERFACE,
                          dif_task_params->dif_on_network ? 1 : 0);
                val = cpu_to_le32(dif_task_params->initial_ref_tag);
                tdif_context->initial_ref_tag = val;
                tdif_context->app_tag_mask =
                             cpu_to_le16(dif_task_params->application_tag_mask);
                SET_FIELD(tdif_context->flags0,
-                         TDIF_TASK_CONTEXT_HOSTGUARDTYPE,
+                         TDIF_TASK_CONTEXT_HOST_GUARD_TYPE,
                          dif_task_params->host_guard_type);
                SET_FIELD(tdif_context->flags0,
-                         TDIF_TASK_CONTEXT_PROTECTIONTYPE,
+                         TDIF_TASK_CONTEXT_PROTECTION_TYPE,
                          dif_task_params->protection_type);
                SET_FIELD(tdif_context->flags0,
-                         TDIF_TASK_CONTEXT_INITIALREFTAGVALID,
+                         TDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID,
                          dif_task_params->initial_ref_tag_is_valid ? 1 : 0);
                SET_FIELD(tdif_context->flags0,
-                         TDIF_TASK_CONTEXT_KEEPREFTAGCONST,
+                         TDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST,
                          dif_task_params->keep_ref_tag_const ? 1 : 0);
-               SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_VALIDATEGUARD,
+               SET_FIELD(tdif_context->flags1,
+                         TDIF_TASK_CONTEXT_VALIDATE_GUARD,
                          (dif_task_params->validate_guard &&
                           dif_task_params->dif_on_host) ? 1 : 0);
                SET_FIELD(tdif_context->flags1,
-                         TDIF_TASK_CONTEXT_VALIDATEAPPTAG,
+                         TDIF_TASK_CONTEXT_VALIDATE_APP_TAG,
                          (dif_task_params->validate_app_tag &&
                          dif_task_params->dif_on_host) ? 1 : 0);
                SET_FIELD(tdif_context->flags1,
-                         TDIF_TASK_CONTEXT_VALIDATEREFTAG,
+                         TDIF_TASK_CONTEXT_VALIDATE_REF_TAG,
                          (dif_task_params->validate_ref_tag &&
                           dif_task_params->dif_on_host) ? 1 : 0);
                SET_FIELD(tdif_context->flags1,
-                         TDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK,
+                         TDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK,
                          dif_task_params->forward_app_tag_with_mask ? 1 : 0);
                SET_FIELD(tdif_context->flags1,
-                         TDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK,
+                         TDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK,
                          dif_task_params->forward_ref_tag_with_mask ? 1 : 0);
                SET_FIELD(tdif_context->flags1,
-                         TDIF_TASK_CONTEXT_REFTAGMASK,
+                         TDIF_TASK_CONTEXT_REF_TAG_MASK,
                          dif_task_params->ref_tag_mask);
                SET_FIELD(tdif_context->flags0,
-                         TDIF_TASK_CONTEXT_IGNOREAPPTAG,
+                         TDIF_TASK_CONTEXT_IGNORE_APP_TAG,
                          dif_task_params->ignore_app_tag ? 1 : 0);
        }
 }
 
-static void set_local_completion_context(struct iscsi_task_context *context)
+static void set_local_completion_context(struct e4_iscsi_task_context *context)
 {
        SET_FIELD(context->ystorm_st_context.state.flags,
                  YSTORM_ISCSI_TASK_STATE_LOCAL_COMP, 1);
@@ -481,7 +488,7 @@ static int init_rw_iscsi_task(struct iscsi_task_params *task_params,
                              struct scsi_dif_task_params *dif_task_params)
 {
        u32 exp_data_transfer_len = conn_params->max_burst_length;
-       struct iscsi_task_context *cxt;
+       struct e4_iscsi_task_context *cxt;
        bool slow_io = false;
        u32 task_size, val;
        u8 num_sges = 0;
@@ -494,19 +501,33 @@ static int init_rw_iscsi_task(struct iscsi_task_params *task_params,
 
        cxt = task_params->context;
 
-       val = cpu_to_le32(task_size);
-       cxt->ystorm_st_context.pdu_hdr.cmd.expected_transfer_length = val;
-       init_initiator_rw_cdb_ystorm_context(&cxt->ystorm_st_context,
-                                            cmd_params);
-       val = cpu_to_le32(cmd_params->sense_data_buffer_phys_addr.lo);
-       cxt->mstorm_st_context.sense_db.lo = val;
 
-       val = cpu_to_le32(cmd_params->sense_data_buffer_phys_addr.hi);
-       cxt->mstorm_st_context.sense_db.hi = val;
+       if (task_type == ISCSI_TASK_TYPE_TARGET_READ) {
+               set_local_completion_context(cxt);
+       } else if (task_type == ISCSI_TASK_TYPE_TARGET_WRITE) {
+               val = cpu_to_le32(task_size +
+                          ((struct iscsi_r2t_hdr *)pdu_header)->buffer_offset);
+               cxt->ystorm_st_context.pdu_hdr.r2t.desired_data_trns_len = val;
+               cxt->mstorm_st_context.expected_itt =
+                                                  cpu_to_le32(pdu_header->itt);
+       } else {
+               val = cpu_to_le32(task_size);
+               cxt->ystorm_st_context.pdu_hdr.cmd.expected_transfer_length =
+                                                                           val;
+               init_initiator_rw_cdb_ystorm_context(&cxt->ystorm_st_context,
+                                                    cmd_params);
+               val = cpu_to_le32(cmd_params->sense_data_buffer_phys_addr.lo);
+               cxt->mstorm_st_context.sense_db.lo = val;
+
+               val = cpu_to_le32(cmd_params->sense_data_buffer_phys_addr.hi);
+               cxt->mstorm_st_context.sense_db.hi = val;
+       }
 
        if (task_params->tx_io_size) {
                init_dif_context_flags(&cxt->ystorm_st_context.state.dif_flags,
                                       dif_task_params);
+               init_dif_context_flags(&cxt->ustorm_st_context.dif_flags,
+                                      dif_task_params);
                init_scsi_sgl_context(&cxt->ystorm_st_context.state.sgl_params,
                                      &cxt->ystorm_st_context.state.data_desc,
                                      sgl_task_params);
@@ -595,7 +616,7 @@ int init_initiator_login_request_task(struct iscsi_task_params *task_params,
                                      struct scsi_sgl_task_params *tx_params,
                                      struct scsi_sgl_task_params *rx_params)
 {
-       struct iscsi_task_context *cxt;
+       struct e4_iscsi_task_context *cxt;
 
        cxt = task_params->context;
 
@@ -637,7 +658,7 @@ int init_initiator_nop_out_task(struct iscsi_task_params *task_params,
                                struct scsi_sgl_task_params *tx_sgl_task_params,
                                struct scsi_sgl_task_params *rx_sgl_task_params)
 {
-       struct iscsi_task_context *cxt;
+       struct e4_iscsi_task_context *cxt;
 
        cxt = task_params->context;
 
@@ -683,7 +704,7 @@ int init_initiator_logout_request_task(struct iscsi_task_params *task_params,
                                       struct scsi_sgl_task_params *tx_params,
                                       struct scsi_sgl_task_params *rx_params)
 {
-       struct iscsi_task_context *cxt;
+       struct e4_iscsi_task_context *cxt;
 
        cxt = task_params->context;
 
@@ -738,7 +759,7 @@ int init_initiator_text_request_task(struct iscsi_task_params *task_params,
                                     struct scsi_sgl_task_params *tx_params,
                                     struct scsi_sgl_task_params *rx_params)
 {
-       struct iscsi_task_context *cxt;
+       struct e4_iscsi_task_context *cxt;
 
        cxt = task_params->context;
 
index b6f24f9..c3deb77 100644 (file)
@@ -13,7 +13,7 @@
 #include "qedi_fw_scsi.h"
 
 struct iscsi_task_params {
-       struct iscsi_task_context *context;
+       struct e4_iscsi_task_context *context;
        struct iscsi_wqe          *sqe;
        u32                       tx_io_size;
        u32                       rx_io_size;
index 63d793f..f5b5a31 100644 (file)
@@ -52,11 +52,12 @@ void qedi_update_itt_map(struct qedi_ctx *qedi, u32 tid, u32 proto_itt,
 void qedi_get_proto_itt(struct qedi_ctx *qedi, u32 tid, u32 *proto_itt);
 void qedi_get_task_tid(struct qedi_ctx *qedi, u32 itt, int16_t *tid);
 void qedi_process_iscsi_error(struct qedi_endpoint *ep,
-                             struct async_data *data);
+                             struct iscsi_eqe_data *data);
 void qedi_start_conn_recovery(struct qedi_ctx *qedi,
                              struct qedi_conn *qedi_conn);
 struct qedi_conn *qedi_get_conn_from_id(struct qedi_ctx *qedi, u32 iscsi_cid);
-void qedi_process_tcp_error(struct qedi_endpoint *ep, struct async_data *data);
+void qedi_process_tcp_error(struct qedi_endpoint *ep,
+                           struct iscsi_eqe_data *data);
 void qedi_mark_device_missing(struct iscsi_cls_session *cls_session);
 void qedi_mark_device_available(struct iscsi_cls_session *cls_session);
 void qedi_reset_host_mtu(struct qedi_ctx *qedi, u16 mtu);
index a02b34e..7ec7f6e 100644 (file)
@@ -539,7 +539,6 @@ static int qedi_iscsi_offload_conn(struct qedi_endpoint *qedi_ep)
        conn_info->ka_max_probe_cnt = DEF_KA_MAX_PROBE_COUNT;
        conn_info->dup_ack_theshold = 3;
        conn_info->rcv_wnd = 65535;
-       conn_info->cwnd = DEF_MAX_CWND;
 
        conn_info->ss_thresh = 65535;
        conn_info->srtt = 300;
@@ -557,8 +556,8 @@ static int qedi_iscsi_offload_conn(struct qedi_endpoint *qedi_ep)
                                       (qedi_ep->ip_type == TCP_IPV6),
                                       1, (qedi_ep->vlan_id != 0));
 
+       conn_info->cwnd = DEF_MAX_CWND * conn_info->mss;
        conn_info->rcv_wnd_scale = 4;
-       conn_info->ts_ticks_per_second = 1000;
        conn_info->da_timeout_value = 200;
        conn_info->ack_frequency = 2;
 
@@ -1557,7 +1556,8 @@ char *qedi_get_iscsi_error(enum iscsi_error_types err_code)
        return msg;
 }
 
-void qedi_process_iscsi_error(struct qedi_endpoint *ep, struct async_data *data)
+void qedi_process_iscsi_error(struct qedi_endpoint *ep,
+                             struct iscsi_eqe_data *data)
 {
        struct qedi_conn *qedi_conn;
        struct qedi_ctx *qedi;
@@ -1603,7 +1603,8 @@ void qedi_process_iscsi_error(struct qedi_endpoint *ep, struct async_data *data)
                qedi_start_conn_recovery(qedi_conn->qedi, qedi_conn);
 }
 
-void qedi_process_tcp_error(struct qedi_endpoint *ep, struct async_data *data)
+void qedi_process_tcp_error(struct qedi_endpoint *ep,
+                           struct iscsi_eqe_data *data)
 {
        struct qedi_conn *qedi_conn;
 
index 3247287..ea13151 100644 (file)
@@ -182,7 +182,7 @@ struct qedi_cmd {
        struct scsi_cmnd *scsi_cmd;
        struct scatterlist *sg;
        struct qedi_io_bdt io_tbl;
-       struct iscsi_task_context request;
+       struct e4_iscsi_task_context request;
        unsigned char *sense_buffer;
        dma_addr_t sense_buffer_dma;
        u16 task_id;
index cccc34a..a000223 100644 (file)
@@ -60,7 +60,7 @@ static int qedi_iscsi_event_cb(void *context, u8 fw_event_code, void *fw_handle)
 {
        struct qedi_ctx *qedi;
        struct qedi_endpoint *qedi_ep;
-       struct async_data *data;
+       struct iscsi_eqe_data *data;
        int rval = 0;
 
        if (!context || !fw_handle) {
@@ -72,18 +72,18 @@ static int qedi_iscsi_event_cb(void *context, u8 fw_event_code, void *fw_handle)
        QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
                  "Recv Event %d fw_handle %p\n", fw_event_code, fw_handle);
 
-       data = (struct async_data *)fw_handle;
+       data = (struct iscsi_eqe_data *)fw_handle;
        QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
-                 "cid=0x%x tid=0x%x err-code=0x%x fw-dbg-param=0x%x\n",
-                  data->cid, data->itid, data->error_code,
-                  data->fw_debug_param);
+                 "icid=0x%x conn_id=0x%x err-code=0x%x error-pdu-opcode-reserved=0x%x\n",
+                  data->icid, data->conn_id, data->error_code,
+                  data->error_pdu_opcode_reserved);
 
-       qedi_ep = qedi->ep_tbl[data->cid];
+       qedi_ep = qedi->ep_tbl[data->icid];
 
        if (!qedi_ep) {
                QEDI_WARN(&qedi->dbg_ctx,
                          "Cannot process event, ep already disconnected, cid=0x%x\n",
-                          data->cid);
+                          data->icid);
                WARN_ON(1);
                return -ENODEV;
        }
@@ -339,12 +339,12 @@ static int qedi_init_uio(struct qedi_ctx *qedi)
 static int qedi_alloc_and_init_sb(struct qedi_ctx *qedi,
                                  struct qed_sb_info *sb_info, u16 sb_id)
 {
-       struct status_block *sb_virt;
+       struct status_block_e4 *sb_virt;
        dma_addr_t sb_phys;
        int ret;
 
        sb_virt = dma_alloc_coherent(&qedi->pdev->dev,
-                                    sizeof(struct status_block), &sb_phys,
+                                    sizeof(struct status_block_e4), &sb_phys,
                                     GFP_KERNEL);
        if (!sb_virt) {
                QEDI_ERR(&qedi->dbg_ctx,
@@ -858,7 +858,6 @@ static int qedi_set_iscsi_pf_param(struct qedi_ctx *qedi)
 
        qedi->pf_params.iscsi_pf_params.gl_rq_pi = QEDI_PROTO_CQ_PROD_IDX;
        qedi->pf_params.iscsi_pf_params.gl_cmd_pi = 1;
-       qedi->pf_params.iscsi_pf_params.ooo_enable = 1;
 
 err_alloc_mem:
        return rval;
@@ -961,7 +960,7 @@ static bool qedi_process_completions(struct qedi_fastpath *fp)
 {
        struct qedi_ctx *qedi = fp->qedi;
        struct qed_sb_info *sb_info = fp->sb_info;
-       struct status_block *sb = sb_info->sb_virt;
+       struct status_block_e4 *sb = sb_info->sb_virt;
        struct qedi_percpu_s *p = NULL;
        struct global_queue *que;
        u16 prod_idx;
@@ -1015,7 +1014,7 @@ static bool qedi_fp_has_work(struct qedi_fastpath *fp)
        struct qedi_ctx *qedi = fp->qedi;
        struct global_queue *que;
        struct qed_sb_info *sb_info = fp->sb_info;
-       struct status_block *sb = sb_info->sb_virt;
+       struct status_block_e4 *sb = sb_info->sb_virt;
        u16 prod_idx;
 
        barrier();
@@ -1262,8 +1261,10 @@ static int qedi_alloc_bdq(struct qedi_ctx *qedi)
                QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
                          "pbl [0x%p] pbl->address hi [0x%llx] lo [0x%llx], idx [%d]\n",
                          pbl, pbl->address.hi, pbl->address.lo, i);
-               pbl->opaque.hi = 0;
-               pbl->opaque.lo = cpu_to_le32(QEDI_U64_LO(i));
+               pbl->opaque.iscsi_opaque.reserved_zero[0] = 0;
+               pbl->opaque.iscsi_opaque.reserved_zero[1] = 0;
+               pbl->opaque.iscsi_opaque.reserved_zero[2] = 0;
+               pbl->opaque.iscsi_opaque.opaque = cpu_to_le16(i);
                pbl++;
        }
 
index d61e3ac..8a0e523 100644 (file)
@@ -7,8 +7,8 @@
  * this source tree.
  */
 
-#define QEDI_MODULE_VERSION    "8.10.4.0"
+#define QEDI_MODULE_VERSION    "8.33.0.20"
 #define QEDI_DRIVER_MAJOR_VER          8
-#define QEDI_DRIVER_MINOR_VER          10
-#define QEDI_DRIVER_REV_VER            4
-#define QEDI_DRIVER_ENG_VER            0
+#define QEDI_DRIVER_MINOR_VER          33
+#define QEDI_DRIVER_REV_VER            0
+#define QEDI_DRIVER_ENG_VER            20
index 449ef5a..dfb8da8 100644 (file)
@@ -374,10 +374,8 @@ int scsi_dev_info_list_add_keyed(int compatible, char *vendor, char *model,
                            model, compatible);
 
        if (strflags)
-               devinfo->flags = simple_strtoul(strflags, NULL, 0);
-       else
-               devinfo->flags = flags;
-
+               flags = (__force blist_flags_t)simple_strtoul(strflags, NULL, 0);
+       devinfo->flags = flags;
        devinfo->compatible = compatible;
 
        if (compatible)
index be5e919..0880d97 100644 (file)
@@ -770,7 +770,7 @@ static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
  *     SCSI_SCAN_LUN_PRESENT: a new scsi_device was allocated and initialized
  **/
 static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
-               int *bflags, int async)
+               blist_flags_t *bflags, int async)
 {
        int ret;
 
@@ -1049,14 +1049,15 @@ static unsigned char *scsi_inq_str(unsigned char *buf, unsigned char *inq,
  *   - SCSI_SCAN_LUN_PRESENT: a new scsi_device was allocated and initialized
  **/
 static int scsi_probe_and_add_lun(struct scsi_target *starget,
-                                 u64 lun, int *bflagsp,
+                                 u64 lun, blist_flags_t *bflagsp,
                                  struct scsi_device **sdevp,
                                  enum scsi_scan_mode rescan,
                                  void *hostdata)
 {
        struct scsi_device *sdev;
        unsigned char *result;
-       int bflags, res = SCSI_SCAN_NO_RESPONSE, result_len = 256;
+       blist_flags_t bflags;
+       int res = SCSI_SCAN_NO_RESPONSE, result_len = 256;
        struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
 
        /*
@@ -1201,7 +1202,7 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
  *     Modifies sdevscan->lun.
  **/
 static void scsi_sequential_lun_scan(struct scsi_target *starget,
-                                    int bflags, int scsi_level,
+                                    blist_flags_t bflags, int scsi_level,
                                     enum scsi_scan_mode rescan)
 {
        uint max_dev_lun;
@@ -1292,7 +1293,7 @@ static void scsi_sequential_lun_scan(struct scsi_target *starget,
  *     0: scan completed (or no memory, so further scanning is futile)
  *     1: could not scan with REPORT LUN
  **/
-static int scsi_report_lun_scan(struct scsi_target *starget, int bflags,
+static int scsi_report_lun_scan(struct scsi_target *starget, blist_flags_t bflags,
                                enum scsi_scan_mode rescan)
 {
        unsigned char scsi_cmd[MAX_COMMAND_SIZE];
@@ -1538,7 +1539,7 @@ static void __scsi_scan_target(struct device *parent, unsigned int channel,
                unsigned int id, u64 lun, enum scsi_scan_mode rescan)
 {
        struct Scsi_Host *shost = dev_to_shost(parent);
-       int bflags = 0;
+       blist_flags_t bflags = 0;
        int res;
        struct scsi_target *starget;
 
index 50e7d7e..a9996c1 100644 (file)
@@ -967,7 +967,8 @@ sdev_show_wwid(struct device *dev, struct device_attribute *attr,
 }
 static DEVICE_ATTR(wwid, S_IRUGO, sdev_show_wwid, NULL);
 
-#define BLIST_FLAG_NAME(name) [ilog2(BLIST_##name)] = #name
+#define BLIST_FLAG_NAME(name)                                  \
+       [ilog2((__force unsigned int)BLIST_##name)] = #name
 static const char *const sdev_bflags_name[] = {
 #include "scsi_devinfo_tbl.c"
 };
@@ -984,7 +985,7 @@ sdev_show_blacklist(struct device *dev, struct device_attribute *attr,
        for (i = 0; i < sizeof(sdev->sdev_bflags) * BITS_PER_BYTE; i++) {
                const char *name = NULL;
 
-               if (!(sdev->sdev_bflags & BIT(i)))
+               if (!(sdev->sdev_bflags & (__force blist_flags_t)BIT(i)))
                        continue;
                if (i < ARRAY_SIZE(sdev_bflags_name) && sdev_bflags_name[i])
                        name = sdev_bflags_name[i];
index d0219e3..10ebb21 100644 (file)
 
 /* Our blacklist flags */
 enum {
-       SPI_BLIST_NOIUS = 0x1,
+       SPI_BLIST_NOIUS = (__force blist_flags_t)0x1,
 };
 
 /* blacklist table, modelled on scsi_devinfo.c */
 static struct {
        char *vendor;
        char *model;
-       unsigned flags;
+       blist_flags_t flags;
 } spi_static_device_list[] __initdata = {
        {"HP", "Ultrium 3-SCSI", SPI_BLIST_NOIUS },
        {"IBM", "ULTRIUM-TD3", SPI_BLIST_NOIUS },
@@ -221,9 +221,11 @@ static int spi_device_configure(struct transport_container *tc,
 {
        struct scsi_device *sdev = to_scsi_device(dev);
        struct scsi_target *starget = sdev->sdev_target;
-       unsigned bflags = scsi_get_device_flags_keyed(sdev, &sdev->inquiry[8],
-                                                     &sdev->inquiry[16],
-                                                     SCSI_DEVINFO_SPI);
+       blist_flags_t bflags;
+
+       bflags = scsi_get_device_flags_keyed(sdev, &sdev->inquiry[8],
+                                            &sdev->inquiry[16],
+                                            SCSI_DEVINFO_SPI);
 
        /* Populate the target capability fields with the values
         * gleaned from the device inquiry */
index 77fe55c..d653453 100644 (file)
@@ -79,6 +79,7 @@
 #define A3700_SPI_BYTE_LEN             BIT(5)
 #define A3700_SPI_CLK_PRESCALE         BIT(0)
 #define A3700_SPI_CLK_PRESCALE_MASK    (0x1f)
+#define A3700_SPI_CLK_EVEN_OFFS                (0x10)
 
 #define A3700_SPI_WFIFO_THRS_BIT       28
 #define A3700_SPI_RFIFO_THRS_BIT       24
@@ -220,6 +221,13 @@ static void a3700_spi_clock_set(struct a3700_spi *a3700_spi,
 
        prescale = DIV_ROUND_UP(clk_get_rate(a3700_spi->clk), speed_hz);
 
+       /* For prescaler values over 15, we can only set it by steps of 2.
+        * Starting from A3700_SPI_CLK_EVEN_OFFS, we set values from 0 up to
+        * 30. We only use this range from 16 to 30.
+        */
+       if (prescale > 15)
+               prescale = A3700_SPI_CLK_EVEN_OFFS + DIV_ROUND_UP(prescale, 2);
+
        val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
        val = val & ~A3700_SPI_CLK_PRESCALE_MASK;
 
index f95da36..6694709 100644 (file)
@@ -1661,12 +1661,12 @@ static int atmel_spi_remove(struct platform_device *pdev)
        pm_runtime_get_sync(&pdev->dev);
 
        /* reset the hardware and block queue progress */
-       spin_lock_irq(&as->lock);
        if (as->use_dma) {
                atmel_spi_stop_dma(master);
                atmel_spi_release_dma(master);
        }
 
+       spin_lock_irq(&as->lock);
        spi_writel(as, CR, SPI_BIT(SWRST));
        spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */
        spi_readl(as, SR);
index 2ce8757..0835a8d 100644 (file)
@@ -377,8 +377,8 @@ static int qspi_set_config_register(struct rspi_data *rspi, int access_size)
        /* Sets SPCMD */
        rspi_write16(rspi, rspi->spcmd, RSPI_SPCMD0);
 
-       /* Enables SPI function in master mode */
-       rspi_write8(rspi, SPCR_SPE | SPCR_MSTR, RSPI_SPCR);
+       /* Sets RSPI mode */
+       rspi_write8(rspi, SPCR_MSTR, RSPI_SPCR);
 
        return 0;
 }
index c5cd635..4141003 100644 (file)
@@ -525,7 +525,7 @@ err_free_master:
 
 static int sun4i_spi_remove(struct platform_device *pdev)
 {
-       pm_runtime_disable(&pdev->dev);
+       pm_runtime_force_suspend(&pdev->dev);
 
        return 0;
 }
index bc7100b..e0b9fe1 100644 (file)
@@ -271,6 +271,7 @@ static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
        while (remaining_words) {
                int n_words, tx_words, rx_words;
                u32 sr;
+               int stalled;
 
                n_words = min(remaining_words, xspi->buffer_size);
 
@@ -299,7 +300,17 @@ static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
 
                /* Read out all the data from the Rx FIFO */
                rx_words = n_words;
+               stalled = 10;
                while (rx_words) {
+                       if (rx_words == n_words && !(stalled--) &&
+                           !(sr & XSPI_SR_TX_EMPTY_MASK) &&
+                           (sr & XSPI_SR_RX_EMPTY_MASK)) {
+                               dev_err(&spi->dev,
+                                       "Detected stall. Check C_SPI_MODE and C_SPI_MEMORY\n");
+                               xspi_init_hw(xspi);
+                               return -EIO;
+                       }
+
                        if ((sr & XSPI_SR_TX_EMPTY_MASK) && (rx_words > 1)) {
                                xilinx_spi_rx(xspi);
                                rx_words--;
index 7c69b4a..0d99b24 100644 (file)
@@ -920,7 +920,7 @@ pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
                                        " %d i: %d bio: %p, allocating another"
                                        " bio\n", bio->bi_vcnt, i, bio);
 
-                               rc = blk_rq_append_bio(req, bio);
+                               rc = blk_rq_append_bio(req, &bio);
                                if (rc) {
                                        pr_err("pSCSI: failed to append bio\n");
                                        goto fail;
@@ -938,7 +938,7 @@ pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
        }
 
        if (bio) {
-               rc = blk_rq_append_bio(req, bio);
+               rc = blk_rq_append_bio(req, &bio);
                if (rc) {
                        pr_err("pSCSI: failed to append bio\n");
                        goto fail;
index f77e499..065f0b6 100644 (file)
@@ -257,10 +257,25 @@ static void release_memory_resource(struct resource *resource)
        kfree(resource);
 }
 
+/*
+ * Host memory not allocated to dom0. We can use this range for hotplug-based
+ * ballooning.
+ *
+ * It's a type-less resource. Setting IORESOURCE_MEM will make resource
+ * management algorithms (arch_remove_reservations()) look into guest e820,
+ * which we don't want.
+ */
+static struct resource hostmem_resource = {
+       .name   = "Host RAM",
+};
+
+void __attribute__((weak)) __init arch_xen_balloon_init(struct resource *res)
+{}
+
 static struct resource *additional_memory_resource(phys_addr_t size)
 {
-       struct resource *res;
-       int ret;
+       struct resource *res, *res_hostmem;
+       int ret = -ENOMEM;
 
        res = kzalloc(sizeof(*res), GFP_KERNEL);
        if (!res)
@@ -269,13 +284,42 @@ static struct resource *additional_memory_resource(phys_addr_t size)
        res->name = "System RAM";
        res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
 
-       ret = allocate_resource(&iomem_resource, res,
-                               size, 0, -1,
-                               PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL);
-       if (ret < 0) {
-               pr_err("Cannot allocate new System RAM resource\n");
-               kfree(res);
-               return NULL;
+       res_hostmem = kzalloc(sizeof(*res), GFP_KERNEL);
+       if (res_hostmem) {
+               /* Try to grab a range from hostmem */
+               res_hostmem->name = "Host memory";
+               ret = allocate_resource(&hostmem_resource, res_hostmem,
+                                       size, 0, -1,
+                                       PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL);
+       }
+
+       if (!ret) {
+               /*
+                * Insert this resource into iomem. Because hostmem_resource
+                * tracks portion of guest e820 marked as UNUSABLE noone else
+                * should try to use it.
+                */
+               res->start = res_hostmem->start;
+               res->end = res_hostmem->end;
+               ret = insert_resource(&iomem_resource, res);
+               if (ret < 0) {
+                       pr_err("Can't insert iomem_resource [%llx - %llx]\n",
+                               res->start, res->end);
+                       release_memory_resource(res_hostmem);
+                       res_hostmem = NULL;
+                       res->start = res->end = 0;
+               }
+       }
+
+       if (ret) {
+               ret = allocate_resource(&iomem_resource, res,
+                                       size, 0, -1,
+                                       PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL);
+               if (ret < 0) {
+                       pr_err("Cannot allocate new System RAM resource\n");
+                       kfree(res);
+                       return NULL;
+               }
        }
 
 #ifdef CONFIG_SPARSEMEM
@@ -287,6 +331,7 @@ static struct resource *additional_memory_resource(phys_addr_t size)
                        pr_err("New System RAM resource outside addressable RAM (%lu > %lu)\n",
                               pfn, limit);
                        release_memory_resource(res);
+                       release_memory_resource(res_hostmem);
                        return NULL;
                }
        }
@@ -765,6 +810,8 @@ static int __init balloon_init(void)
        set_online_page_callback(&xen_online_page);
        register_memory_notifier(&xen_memory_nb);
        register_sysctl_table(xen_root);
+
+       arch_xen_balloon_init(&hostmem_resource);
 #endif
 
 #ifdef CONFIG_XEN_PV
index f937082..58e2fe4 100644 (file)
@@ -34,6 +34,7 @@ config CRAMFS_BLOCKDEV
 config CRAMFS_MTD
        bool "Support CramFs image directly mapped in physical memory"
        depends on CRAMFS && MTD
+       depends on CRAMFS=m || MTD=y
        default y if !CRAMFS_BLOCKDEV
        help
          This option allows the CramFs driver to load data directly from
index 156f56a..5688b5e 100644 (file)
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1339,15 +1339,10 @@ void setup_new_exec(struct linux_binprm * bprm)
                 * avoid bad behavior from the prior rlimits. This has to
                 * happen before arch_pick_mmap_layout(), which examines
                 * RLIMIT_STACK, but after the point of no return to avoid
-                * races from other threads changing the limits. This also
-                * must be protected from races with prlimit() calls.
+                * needing to clean up the change on failure.
                 */
-               task_lock(current->group_leader);
                if (current->signal->rlim[RLIMIT_STACK].rlim_cur > _STK_LIM)
                        current->signal->rlim[RLIMIT_STACK].rlim_cur = _STK_LIM;
-               if (current->signal->rlim[RLIMIT_STACK].rlim_max > _STK_LIM)
-                       current->signal->rlim[RLIMIT_STACK].rlim_max = _STK_LIM;
-               task_unlock(current->group_leader);
        }
 
        arch_pick_mmap_layout(current->mm);
index 07bca11..c941251 100644 (file)
@@ -4722,6 +4722,7 @@ retry:
                                                    EXT4_INODE_EOFBLOCKS);
                }
                ext4_mark_inode_dirty(handle, inode);
+               ext4_update_inode_fsync_trans(handle, inode, 1);
                ret2 = ext4_journal_stop(handle);
                if (ret2)
                        break;
index b4267d7..b32cf26 100644 (file)
@@ -816,6 +816,8 @@ struct inode *__ext4_new_inode(handle_t *handle, struct inode *dir,
 #ifdef CONFIG_EXT4_FS_POSIX_ACL
                struct posix_acl *p = get_acl(dir, ACL_TYPE_DEFAULT);
 
+               if (IS_ERR(p))
+                       return ERR_CAST(p);
                if (p) {
                        int acl_size = p->a_count * sizeof(ext4_acl_entry);
 
index 7df2c56..534a913 100644 (file)
@@ -149,6 +149,15 @@ static int ext4_meta_trans_blocks(struct inode *inode, int lblocks,
  */
 int ext4_inode_is_fast_symlink(struct inode *inode)
 {
+       if (!(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)) {
+               int ea_blocks = EXT4_I(inode)->i_file_acl ?
+                               EXT4_CLUSTER_SIZE(inode->i_sb) >> 9 : 0;
+
+               if (ext4_has_inline_data(inode))
+                       return 0;
+
+               return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0);
+       }
        return S_ISLNK(inode->i_mode) && inode->i_size &&
               (inode->i_size < EXT4_N_BLOCKS * 4);
 }
index 798b3ac..e750d68 100644 (file)
@@ -1399,6 +1399,10 @@ static struct buffer_head * ext4_find_entry (struct inode *dir,
                               "falling back\n"));
        }
        nblocks = dir->i_size >> EXT4_BLOCK_SIZE_BITS(sb);
+       if (!nblocks) {
+               ret = NULL;
+               goto cleanup_and_exit;
+       }
        start = EXT4_I(dir)->i_dir_start_lookup;
        if (start >= nblocks)
                start = 0;
index e158ec6..9d1374a 100644 (file)
@@ -2826,6 +2826,7 @@ long do_mount(const char *dev_name, const char __user *dir_name,
                            SB_DIRSYNC |
                            SB_SILENT |
                            SB_POSIXACL |
+                           SB_LAZYTIME |
                            SB_I_VERSION);
 
        if (flags & MS_REMOUNT)
index 7c6f76d..36b0772 100644 (file)
--- a/fs/nsfs.c
+++ b/fs/nsfs.c
@@ -103,14 +103,14 @@ slow:
        goto got_it;
 }
 
-void *ns_get_path(struct path *path, struct task_struct *task,
-                       const struct proc_ns_operations *ns_ops)
+void *ns_get_path_cb(struct path *path, ns_get_path_helper_t *ns_get_cb,
+                    void *private_data)
 {
        struct ns_common *ns;
        void *ret;
 
 again:
-       ns = ns_ops->get(task);
+       ns = ns_get_cb(private_data);
        if (!ns)
                return ERR_PTR(-ENOENT);
 
@@ -120,6 +120,29 @@ again:
        return ret;
 }
 
+struct ns_get_path_task_args {
+       const struct proc_ns_operations *ns_ops;
+       struct task_struct *task;
+};
+
+static struct ns_common *ns_get_path_task(void *private_data)
+{
+       struct ns_get_path_task_args *args = private_data;
+
+       return args->ns_ops->get(args->task);
+}
+
+void *ns_get_path(struct path *path, struct task_struct *task,
+                 const struct proc_ns_operations *ns_ops)
+{
+       struct ns_get_path_task_args args = {
+               .ns_ops = ns_ops,
+               .task   = task,
+       };
+
+       return ns_get_path_cb(path, ns_get_path_task, &args);
+}
+
 int open_related_ns(struct ns_common *ns,
                   struct ns_common *(*get_ns)(struct ns_common *ns))
 {
index d4e33e8..7ff1349 100644 (file)
@@ -191,6 +191,24 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags,
 
        INIT_LIST_HEAD(&s->s_mounts);
        s->s_user_ns = get_user_ns(user_ns);
+       init_rwsem(&s->s_umount);
+       lockdep_set_class(&s->s_umount, &type->s_umount_key);
+       /*
+        * sget() can have s_umount recursion.
+        *
+        * When it cannot find a suitable sb, it allocates a new
+        * one (this one), and tries again to find a suitable old
+        * one.
+        *
+        * In case that succeeds, it will acquire the s_umount
+        * lock of the old one. Since these are clearly distrinct
+        * locks, and this object isn't exposed yet, there's no
+        * risk of deadlocks.
+        *
+        * Annotate this by putting this lock in a different
+        * subclass.
+        */
+       down_write_nested(&s->s_umount, SINGLE_DEPTH_NESTING);
 
        if (security_sb_alloc(s))
                goto fail;
@@ -218,25 +236,6 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags,
                goto fail;
        if (list_lru_init_memcg(&s->s_inode_lru))
                goto fail;
-
-       init_rwsem(&s->s_umount);
-       lockdep_set_class(&s->s_umount, &type->s_umount_key);
-       /*
-        * sget() can have s_umount recursion.
-        *
-        * When it cannot find a suitable sb, it allocates a new
-        * one (this one), and tries again to find a suitable old
-        * one.
-        *
-        * In case that succeeds, it will acquire the s_umount
-        * lock of the old one. Since these are clearly distrinct
-        * locks, and this object isn't exposed yet, there's no
-        * risk of deadlocks.
-        *
-        * Annotate this by putting this lock in a different
-        * subclass.
-        */
-       down_write_nested(&s->s_umount, SINGLE_DEPTH_NESTING);
        s->s_count = 1;
        atomic_set(&s->s_active, 1);
        mutex_init(&s->s_vfs_rename_mutex);
index 0da8001..83ed771 100644 (file)
@@ -702,7 +702,7 @@ xfs_alloc_ag_vextent(
        ASSERT(args->agbno % args->alignment == 0);
 
        /* if not file data, insert new block into the reverse map btree */
-       if (args->oinfo.oi_owner != XFS_RMAP_OWN_UNKNOWN) {
+       if (!xfs_rmap_should_skip_owner_update(&args->oinfo)) {
                error = xfs_rmap_alloc(args->tp, args->agbp, args->agno,
                                       args->agbno, args->len, &args->oinfo);
                if (error)
@@ -1682,7 +1682,7 @@ xfs_free_ag_extent(
        bno_cur = cnt_cur = NULL;
        mp = tp->t_mountp;
 
-       if (oinfo->oi_owner != XFS_RMAP_OWN_UNKNOWN) {
+       if (!xfs_rmap_should_skip_owner_update(oinfo)) {
                error = xfs_rmap_free(tp, agbp, agno, bno, len, oinfo);
                if (error)
                        goto error0;
index 6249c92..a76914d 100644 (file)
@@ -212,6 +212,7 @@ xfs_attr_set(
        int                     flags)
 {
        struct xfs_mount        *mp = dp->i_mount;
+       struct xfs_buf          *leaf_bp = NULL;
        struct xfs_da_args      args;
        struct xfs_defer_ops    dfops;
        struct xfs_trans_res    tres;
@@ -327,9 +328,16 @@ xfs_attr_set(
                 * GROT: another possible req'mt for a double-split btree op.
                 */
                xfs_defer_init(args.dfops, args.firstblock);
-               error = xfs_attr_shortform_to_leaf(&args);
+               error = xfs_attr_shortform_to_leaf(&args, &leaf_bp);
                if (error)
                        goto out_defer_cancel;
+               /*
+                * Prevent the leaf buffer from being unlocked so that a
+                * concurrent AIL push cannot grab the half-baked leaf
+                * buffer and run into problems with the write verifier.
+                */
+               xfs_trans_bhold(args.trans, leaf_bp);
+               xfs_defer_bjoin(args.dfops, leaf_bp);
                xfs_defer_ijoin(args.dfops, dp);
                error = xfs_defer_finish(&args.trans, args.dfops);
                if (error)
@@ -337,13 +345,14 @@ xfs_attr_set(
 
                /*
                 * Commit the leaf transformation.  We'll need another (linked)
-                * transaction to add the new attribute to the leaf.
+                * transaction to add the new attribute to the leaf, which
+                * means that we have to hold & join the leaf buffer here too.
                 */
-
                error = xfs_trans_roll_inode(&args.trans, dp);
                if (error)
                        goto out;
-
+               xfs_trans_bjoin(args.trans, leaf_bp);
+               leaf_bp = NULL;
        }
 
        if (xfs_bmap_one_block(dp, XFS_ATTR_FORK))
@@ -374,8 +383,9 @@ xfs_attr_set(
 
 out_defer_cancel:
        xfs_defer_cancel(&dfops);
-       args.trans = NULL;
 out:
+       if (leaf_bp)
+               xfs_trans_brelse(args.trans, leaf_bp);
        if (args.trans)
                xfs_trans_cancel(args.trans);
        xfs_iunlock(dp, XFS_ILOCK_EXCL);
index 53cc8b9..601eaa3 100644 (file)
@@ -735,10 +735,13 @@ xfs_attr_shortform_getvalue(xfs_da_args_t *args)
 }
 
 /*
- * Convert from using the shortform to the leaf.
+ * Convert from using the shortform to the leaf.  On success, return the
+ * buffer so that we can keep it locked until we're totally done with it.
  */
 int
-xfs_attr_shortform_to_leaf(xfs_da_args_t *args)
+xfs_attr_shortform_to_leaf(
+       struct xfs_da_args      *args,
+       struct xfs_buf          **leaf_bp)
 {
        xfs_inode_t *dp;
        xfs_attr_shortform_t *sf;
@@ -818,7 +821,7 @@ xfs_attr_shortform_to_leaf(xfs_da_args_t *args)
                sfe = XFS_ATTR_SF_NEXTENTRY(sfe);
        }
        error = 0;
-
+       *leaf_bp = bp;
 out:
        kmem_free(tmpbuffer);
        return error;
index f7dda0c..894124e 100644 (file)
@@ -48,7 +48,8 @@ void  xfs_attr_shortform_create(struct xfs_da_args *args);
 void   xfs_attr_shortform_add(struct xfs_da_args *args, int forkoff);
 int    xfs_attr_shortform_lookup(struct xfs_da_args *args);
 int    xfs_attr_shortform_getvalue(struct xfs_da_args *args);
-int    xfs_attr_shortform_to_leaf(struct xfs_da_args *args);
+int    xfs_attr_shortform_to_leaf(struct xfs_da_args *args,
+                       struct xfs_buf **leaf_bp);
 int    xfs_attr_shortform_remove(struct xfs_da_args *args);
 int    xfs_attr_shortform_allfit(struct xfs_buf *bp, struct xfs_inode *dp);
 int    xfs_attr_shortform_bytesfit(struct xfs_inode *dp, int bytes);
index 1210f68..1bddbba 100644 (file)
@@ -5136,7 +5136,7 @@ __xfs_bunmapi(
         * blowing out the transaction with a mix of EFIs and reflink
         * adjustments.
         */
-       if (xfs_is_reflink_inode(ip) && whichfork == XFS_DATA_FORK)
+       if (tp && xfs_is_reflink_inode(ip) && whichfork == XFS_DATA_FORK)
                max_len = min(len, xfs_refcount_max_unmap(tp->t_log_res));
        else
                max_len = len;
index 072ebfe..087fea0 100644 (file)
@@ -249,6 +249,10 @@ xfs_defer_trans_roll(
        for (i = 0; i < XFS_DEFER_OPS_NR_INODES && dop->dop_inodes[i]; i++)
                xfs_trans_log_inode(*tp, dop->dop_inodes[i], XFS_ILOG_CORE);
 
+       /* Hold the (previously bjoin'd) buffer locked across the roll. */
+       for (i = 0; i < XFS_DEFER_OPS_NR_BUFS && dop->dop_bufs[i]; i++)
+               xfs_trans_dirty_buf(*tp, dop->dop_bufs[i]);
+
        trace_xfs_defer_trans_roll((*tp)->t_mountp, dop);
 
        /* Roll the transaction. */
@@ -264,6 +268,12 @@ xfs_defer_trans_roll(
        for (i = 0; i < XFS_DEFER_OPS_NR_INODES && dop->dop_inodes[i]; i++)
                xfs_trans_ijoin(*tp, dop->dop_inodes[i], 0);
 
+       /* Rejoin the buffers and dirty them so the log moves forward. */
+       for (i = 0; i < XFS_DEFER_OPS_NR_BUFS && dop->dop_bufs[i]; i++) {
+               xfs_trans_bjoin(*tp, dop->dop_bufs[i]);
+               xfs_trans_bhold(*tp, dop->dop_bufs[i]);
+       }
+
        return error;
 }
 
@@ -295,6 +305,31 @@ xfs_defer_ijoin(
                }
        }
 
+       ASSERT(0);
+       return -EFSCORRUPTED;
+}
+
+/*
+ * Add this buffer to the deferred op.  Each joined buffer is relogged
+ * each time we roll the transaction.
+ */
+int
+xfs_defer_bjoin(
+       struct xfs_defer_ops            *dop,
+       struct xfs_buf                  *bp)
+{
+       int                             i;
+
+       for (i = 0; i < XFS_DEFER_OPS_NR_BUFS; i++) {
+               if (dop->dop_bufs[i] == bp)
+                       return 0;
+               else if (dop->dop_bufs[i] == NULL) {
+                       dop->dop_bufs[i] = bp;
+                       return 0;
+               }
+       }
+
+       ASSERT(0);
        return -EFSCORRUPTED;
 }
 
@@ -493,9 +528,7 @@ xfs_defer_init(
        struct xfs_defer_ops            *dop,
        xfs_fsblock_t                   *fbp)
 {
-       dop->dop_committed = false;
-       dop->dop_low = false;
-       memset(&dop->dop_inodes, 0, sizeof(dop->dop_inodes));
+       memset(dop, 0, sizeof(struct xfs_defer_ops));
        *fbp = NULLFSBLOCK;
        INIT_LIST_HEAD(&dop->dop_intake);
        INIT_LIST_HEAD(&dop->dop_pending);
index d4f046d..045beac 100644 (file)
@@ -59,6 +59,7 @@ enum xfs_defer_ops_type {
 };
 
 #define XFS_DEFER_OPS_NR_INODES        2       /* join up to two inodes */
+#define XFS_DEFER_OPS_NR_BUFS  2       /* join up to two buffers */
 
 struct xfs_defer_ops {
        bool                    dop_committed;  /* did any trans commit? */
@@ -66,8 +67,9 @@ struct xfs_defer_ops {
        struct list_head        dop_intake;     /* unlogged pending work */
        struct list_head        dop_pending;    /* logged pending work */
 
-       /* relog these inodes with each roll */
+       /* relog these with each roll */
        struct xfs_inode        *dop_inodes[XFS_DEFER_OPS_NR_INODES];
+       struct xfs_buf          *dop_bufs[XFS_DEFER_OPS_NR_BUFS];
 };
 
 void xfs_defer_add(struct xfs_defer_ops *dop, enum xfs_defer_ops_type type,
@@ -77,6 +79,7 @@ void xfs_defer_cancel(struct xfs_defer_ops *dop);
 void xfs_defer_init(struct xfs_defer_ops *dop, xfs_fsblock_t *fbp);
 bool xfs_defer_has_unfinished_work(struct xfs_defer_ops *dop);
 int xfs_defer_ijoin(struct xfs_defer_ops *dop, struct xfs_inode *ip);
+int xfs_defer_bjoin(struct xfs_defer_ops *dop, struct xfs_buf *bp);
 
 /* Description of a deferred type. */
 struct xfs_defer_op_type {
index 89bf16b..b0f3179 100644 (file)
@@ -632,8 +632,6 @@ xfs_iext_insert(
        struct xfs_iext_leaf    *new = NULL;
        int                     nr_entries, i;
 
-       trace_xfs_iext_insert(ip, cur, state, _RET_IP_);
-
        if (ifp->if_height == 0)
                xfs_iext_alloc_root(ifp, cur);
        else if (ifp->if_height == 1)
@@ -661,6 +659,8 @@ xfs_iext_insert(
        xfs_iext_set(cur_rec(cur), irec);
        ifp->if_bytes += sizeof(struct xfs_iext_rec);
 
+       trace_xfs_iext_insert(ip, cur, state, _RET_IP_);
+
        if (new)
                xfs_iext_insert_node(ifp, xfs_iext_leaf_key(new, 0), new, 2);
 }
index 585b35d..c40d267 100644 (file)
@@ -1488,27 +1488,12 @@ __xfs_refcount_cow_alloc(
        xfs_extlen_t            aglen,
        struct xfs_defer_ops    *dfops)
 {
-       int                     error;
-
        trace_xfs_refcount_cow_increase(rcur->bc_mp, rcur->bc_private.a.agno,
                        agbno, aglen);
 
        /* Add refcount btree reservation */
-       error = xfs_refcount_adjust_cow(rcur, agbno, aglen,
+       return xfs_refcount_adjust_cow(rcur, agbno, aglen,
                        XFS_REFCOUNT_ADJUST_COW_ALLOC, dfops);
-       if (error)
-               return error;
-
-       /* Add rmap entry */
-       if (xfs_sb_version_hasrmapbt(&rcur->bc_mp->m_sb)) {
-               error = xfs_rmap_alloc_extent(rcur->bc_mp, dfops,
-                               rcur->bc_private.a.agno,
-                               agbno, aglen, XFS_RMAP_OWN_COW);
-               if (error)
-                       return error;
-       }
-
-       return error;
 }
 
 /*
@@ -1521,27 +1506,12 @@ __xfs_refcount_cow_free(
        xfs_extlen_t            aglen,
        struct xfs_defer_ops    *dfops)
 {
-       int                     error;
-
        trace_xfs_refcount_cow_decrease(rcur->bc_mp, rcur->bc_private.a.agno,
                        agbno, aglen);
 
        /* Remove refcount btree reservation */
-       error = xfs_refcount_adjust_cow(rcur, agbno, aglen,
+       return xfs_refcount_adjust_cow(rcur, agbno, aglen,
                        XFS_REFCOUNT_ADJUST_COW_FREE, dfops);
-       if (error)
-               return error;
-
-       /* Remove rmap entry */
-       if (xfs_sb_version_hasrmapbt(&rcur->bc_mp->m_sb)) {
-               error = xfs_rmap_free_extent(rcur->bc_mp, dfops,
-                               rcur->bc_private.a.agno,
-                               agbno, aglen, XFS_RMAP_OWN_COW);
-               if (error)
-                       return error;
-       }
-
-       return error;
 }
 
 /* Record a CoW staging extent in the refcount btree. */
@@ -1552,11 +1522,19 @@ xfs_refcount_alloc_cow_extent(
        xfs_fsblock_t                   fsb,
        xfs_extlen_t                    len)
 {
+       int                             error;
+
        if (!xfs_sb_version_hasreflink(&mp->m_sb))
                return 0;
 
-       return __xfs_refcount_add(mp, dfops, XFS_REFCOUNT_ALLOC_COW,
+       error = __xfs_refcount_add(mp, dfops, XFS_REFCOUNT_ALLOC_COW,
                        fsb, len);
+       if (error)
+               return error;
+
+       /* Add rmap entry */
+       return xfs_rmap_alloc_extent(mp, dfops, XFS_FSB_TO_AGNO(mp, fsb),
+                       XFS_FSB_TO_AGBNO(mp, fsb), len, XFS_RMAP_OWN_COW);
 }
 
 /* Forget a CoW staging event in the refcount btree. */
@@ -1567,9 +1545,17 @@ xfs_refcount_free_cow_extent(
        xfs_fsblock_t                   fsb,
        xfs_extlen_t                    len)
 {
+       int                             error;
+
        if (!xfs_sb_version_hasreflink(&mp->m_sb))
                return 0;
 
+       /* Remove rmap entry */
+       error = xfs_rmap_free_extent(mp, dfops, XFS_FSB_TO_AGNO(mp, fsb),
+                       XFS_FSB_TO_AGBNO(mp, fsb), len, XFS_RMAP_OWN_COW);
+       if (error)
+               return error;
+
        return __xfs_refcount_add(mp, dfops, XFS_REFCOUNT_FREE_COW,
                        fsb, len);
 }
index dd019ce..50db920 100644 (file)
@@ -368,6 +368,51 @@ xfs_rmap_lookup_le_range(
 }
 
 /*
+ * Perform all the relevant owner checks for a removal op.  If we're doing an
+ * unknown-owner removal then we have no owner information to check.
+ */
+static int
+xfs_rmap_free_check_owner(
+       struct xfs_mount        *mp,
+       uint64_t                ltoff,
+       struct xfs_rmap_irec    *rec,
+       xfs_fsblock_t           bno,
+       xfs_filblks_t           len,
+       uint64_t                owner,
+       uint64_t                offset,
+       unsigned int            flags)
+{
+       int                     error = 0;
+
+       if (owner == XFS_RMAP_OWN_UNKNOWN)
+               return 0;
+
+       /* Make sure the unwritten flag matches. */
+       XFS_WANT_CORRUPTED_GOTO(mp, (flags & XFS_RMAP_UNWRITTEN) ==
+                       (rec->rm_flags & XFS_RMAP_UNWRITTEN), out);
+
+       /* Make sure the owner matches what we expect to find in the tree. */
+       XFS_WANT_CORRUPTED_GOTO(mp, owner == rec->rm_owner, out);
+
+       /* Check the offset, if necessary. */
+       if (XFS_RMAP_NON_INODE_OWNER(owner))
+               goto out;
+
+       if (flags & XFS_RMAP_BMBT_BLOCK) {
+               XFS_WANT_CORRUPTED_GOTO(mp, rec->rm_flags & XFS_RMAP_BMBT_BLOCK,
+                               out);
+       } else {
+               XFS_WANT_CORRUPTED_GOTO(mp, rec->rm_offset <= offset, out);
+               XFS_WANT_CORRUPTED_GOTO(mp,
+                               ltoff + rec->rm_blockcount >= offset + len,
+                               out);
+       }
+
+out:
+       return error;
+}
+
+/*
  * Find the extent in the rmap btree and remove it.
  *
  * The record we find should always be an exact match for the extent that we're
@@ -444,33 +489,40 @@ xfs_rmap_unmap(
                goto out_done;
        }
 
-       /* Make sure the unwritten flag matches. */
-       XFS_WANT_CORRUPTED_GOTO(mp, (flags & XFS_RMAP_UNWRITTEN) ==
-                       (ltrec.rm_flags & XFS_RMAP_UNWRITTEN), out_error);
+       /*
+        * If we're doing an unknown-owner removal for EFI recovery, we expect
+        * to find the full range in the rmapbt or nothing at all.  If we
+        * don't find any rmaps overlapping either end of the range, we're
+        * done.  Hopefully this means that the EFI creator already queued
+        * (and finished) a RUI to remove the rmap.
+        */
+       if (owner == XFS_RMAP_OWN_UNKNOWN &&
+           ltrec.rm_startblock + ltrec.rm_blockcount <= bno) {
+               struct xfs_rmap_irec    rtrec;
+
+               error = xfs_btree_increment(cur, 0, &i);
+               if (error)
+                       goto out_error;
+               if (i == 0)
+                       goto out_done;
+               error = xfs_rmap_get_rec(cur, &rtrec, &i);
+               if (error)
+                       goto out_error;
+               XFS_WANT_CORRUPTED_GOTO(mp, i == 1, out_error);
+               if (rtrec.rm_startblock >= bno + len)
+                       goto out_done;
+       }
 
        /* Make sure the extent we found covers the entire freeing range. */
        XFS_WANT_CORRUPTED_GOTO(mp, ltrec.rm_startblock <= bno &&
-               ltrec.rm_startblock + ltrec.rm_blockcount >=
-               bno + len, out_error);
+                       ltrec.rm_startblock + ltrec.rm_blockcount >=
+                       bno + len, out_error);
 
-       /* Make sure the owner matches what we expect to find in the tree. */
-       XFS_WANT_CORRUPTED_GOTO(mp, owner == ltrec.rm_owner ||
-                                   XFS_RMAP_NON_INODE_OWNER(owner), out_error);
-
-       /* Check the offset, if necessary. */
-       if (!XFS_RMAP_NON_INODE_OWNER(owner)) {
-               if (flags & XFS_RMAP_BMBT_BLOCK) {
-                       XFS_WANT_CORRUPTED_GOTO(mp,
-                                       ltrec.rm_flags & XFS_RMAP_BMBT_BLOCK,
-                                       out_error);
-               } else {
-                       XFS_WANT_CORRUPTED_GOTO(mp,
-                                       ltrec.rm_offset <= offset, out_error);
-                       XFS_WANT_CORRUPTED_GOTO(mp,
-                                       ltoff + ltrec.rm_blockcount >= offset + len,
-                                       out_error);
-               }
-       }
+       /* Check owner information. */
+       error = xfs_rmap_free_check_owner(mp, ltoff, &ltrec, bno, len, owner,
+                       offset, flags);
+       if (error)
+               goto out_error;
 
        if (ltrec.rm_startblock == bno && ltrec.rm_blockcount == len) {
                /* exact match, simply remove the record from rmap tree */
@@ -664,6 +716,7 @@ xfs_rmap_map(
                flags |= XFS_RMAP_UNWRITTEN;
        trace_xfs_rmap_map(mp, cur->bc_private.a.agno, bno, len,
                        unwritten, oinfo);
+       ASSERT(!xfs_rmap_should_skip_owner_update(oinfo));
 
        /*
         * For the initial lookup, look for an exact match or the left-adjacent
index 466ede6..0fcd5b1 100644 (file)
@@ -61,7 +61,21 @@ static inline void
 xfs_rmap_skip_owner_update(
        struct xfs_owner_info   *oi)
 {
-       oi->oi_owner = XFS_RMAP_OWN_UNKNOWN;
+       xfs_rmap_ag_owner(oi, XFS_RMAP_OWN_NULL);
+}
+
+static inline bool
+xfs_rmap_should_skip_owner_update(
+       struct xfs_owner_info   *oi)
+{
+       return oi->oi_owner == XFS_RMAP_OWN_NULL;
+}
+
+static inline void
+xfs_rmap_any_owner_update(
+       struct xfs_owner_info   *oi)
+{
+       xfs_rmap_ag_owner(oi, XFS_RMAP_OWN_UNKNOWN);
 }
 
 /* Reverse mapping functions. */
index 44f8c54..64da906 100644 (file)
@@ -538,7 +538,7 @@ xfs_efi_recover(
                return error;
        efdp = xfs_trans_get_efd(tp, efip, efip->efi_format.efi_nextents);
 
-       xfs_rmap_skip_owner_update(&oinfo);
+       xfs_rmap_any_owner_update(&oinfo);
        for (i = 0; i < efip->efi_format.efi_nextents; i++) {
                extp = &efip->efi_format.efi_extents[i];
                error = xfs_trans_free_extent(tp, efdp, extp->ext_start,
index 8f22fc5..60a2e12 100644 (file)
@@ -571,6 +571,11 @@ xfs_growfs_data_private(
                 * this doesn't actually exist in the rmap btree.
                 */
                xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_NULL);
+               error = xfs_rmap_free(tp, bp, agno,
+                               be32_to_cpu(agf->agf_length) - new,
+                               new, &oinfo);
+               if (error)
+                       goto error0;
                error = xfs_free_extent(tp,
                                XFS_AGB_TO_FSB(mp, agno,
                                        be32_to_cpu(agf->agf_length) - new),
index 43005fb..3861d61 100644 (file)
@@ -870,7 +870,7 @@ xfs_eofblocks_worker(
  * based on the 'speculative_cow_prealloc_lifetime' tunable (5m by default).
  * (We'll just piggyback on the post-EOF prealloc space workqueue.)
  */
-STATIC void
+void
 xfs_queue_cowblocks(
        struct xfs_mount *mp)
 {
@@ -1536,8 +1536,23 @@ xfs_inode_free_quota_eofblocks(
        return __xfs_inode_free_quota_eofblocks(ip, xfs_icache_free_eofblocks);
 }
 
+static inline unsigned long
+xfs_iflag_for_tag(
+       int             tag)
+{
+       switch (tag) {
+       case XFS_ICI_EOFBLOCKS_TAG:
+               return XFS_IEOFBLOCKS;
+       case XFS_ICI_COWBLOCKS_TAG:
+               return XFS_ICOWBLOCKS;
+       default:
+               ASSERT(0);
+               return 0;
+       }
+}
+
 static void
-__xfs_inode_set_eofblocks_tag(
+__xfs_inode_set_blocks_tag(
        xfs_inode_t     *ip,
        void            (*execute)(struct xfs_mount *mp),
        void            (*set_tp)(struct xfs_mount *mp, xfs_agnumber_t agno,
@@ -1552,10 +1567,10 @@ __xfs_inode_set_eofblocks_tag(
         * Don't bother locking the AG and looking up in the radix trees
         * if we already know that we have the tag set.
         */
-       if (ip->i_flags & XFS_IEOFBLOCKS)
+       if (ip->i_flags & xfs_iflag_for_tag(tag))
                return;
        spin_lock(&ip->i_flags_lock);
-       ip->i_flags |= XFS_IEOFBLOCKS;
+       ip->i_flags |= xfs_iflag_for_tag(tag);
        spin_unlock(&ip->i_flags_lock);
 
        pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
@@ -1587,13 +1602,13 @@ xfs_inode_set_eofblocks_tag(
        xfs_inode_t     *ip)
 {
        trace_xfs_inode_set_eofblocks_tag(ip);
-       return __xfs_inode_set_eofblocks_tag(ip, xfs_queue_eofblocks,
+       return __xfs_inode_set_blocks_tag(ip, xfs_queue_eofblocks,
                        trace_xfs_perag_set_eofblocks,
                        XFS_ICI_EOFBLOCKS_TAG);
 }
 
 static void
-__xfs_inode_clear_eofblocks_tag(
+__xfs_inode_clear_blocks_tag(
        xfs_inode_t     *ip,
        void            (*clear_tp)(struct xfs_mount *mp, xfs_agnumber_t agno,
                                    int error, unsigned long caller_ip),
@@ -1603,7 +1618,7 @@ __xfs_inode_clear_eofblocks_tag(
        struct xfs_perag *pag;
 
        spin_lock(&ip->i_flags_lock);
-       ip->i_flags &= ~XFS_IEOFBLOCKS;
+       ip->i_flags &= ~xfs_iflag_for_tag(tag);
        spin_unlock(&ip->i_flags_lock);
 
        pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
@@ -1630,7 +1645,7 @@ xfs_inode_clear_eofblocks_tag(
        xfs_inode_t     *ip)
 {
        trace_xfs_inode_clear_eofblocks_tag(ip);
-       return __xfs_inode_clear_eofblocks_tag(ip,
+       return __xfs_inode_clear_blocks_tag(ip,
                        trace_xfs_perag_clear_eofblocks, XFS_ICI_EOFBLOCKS_TAG);
 }
 
@@ -1724,7 +1739,7 @@ xfs_inode_set_cowblocks_tag(
        xfs_inode_t     *ip)
 {
        trace_xfs_inode_set_cowblocks_tag(ip);
-       return __xfs_inode_set_eofblocks_tag(ip, xfs_queue_cowblocks,
+       return __xfs_inode_set_blocks_tag(ip, xfs_queue_cowblocks,
                        trace_xfs_perag_set_cowblocks,
                        XFS_ICI_COWBLOCKS_TAG);
 }
@@ -1734,6 +1749,6 @@ xfs_inode_clear_cowblocks_tag(
        xfs_inode_t     *ip)
 {
        trace_xfs_inode_clear_cowblocks_tag(ip);
-       return __xfs_inode_clear_eofblocks_tag(ip,
+       return __xfs_inode_clear_blocks_tag(ip,
                        trace_xfs_perag_clear_cowblocks, XFS_ICI_COWBLOCKS_TAG);
 }
index bff4d85..d4a7758 100644 (file)
@@ -81,6 +81,7 @@ void xfs_inode_clear_cowblocks_tag(struct xfs_inode *ip);
 int xfs_icache_free_cowblocks(struct xfs_mount *, struct xfs_eofblocks *);
 int xfs_inode_free_quota_cowblocks(struct xfs_inode *ip);
 void xfs_cowblocks_worker(struct work_struct *);
+void xfs_queue_cowblocks(struct xfs_mount *);
 
 int xfs_inode_ag_iterator(struct xfs_mount *mp,
        int (*execute)(struct xfs_inode *ip, int flags, void *args),
index b41952a..6f95bdb 100644 (file)
@@ -1487,6 +1487,24 @@ xfs_link(
        return error;
 }
 
+/* Clear the reflink flag and the cowblocks tag if possible. */
+static void
+xfs_itruncate_clear_reflink_flags(
+       struct xfs_inode        *ip)
+{
+       struct xfs_ifork        *dfork;
+       struct xfs_ifork        *cfork;
+
+       if (!xfs_is_reflink_inode(ip))
+               return;
+       dfork = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
+       cfork = XFS_IFORK_PTR(ip, XFS_COW_FORK);
+       if (dfork->if_bytes == 0 && cfork->if_bytes == 0)
+               ip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK;
+       if (cfork->if_bytes == 0)
+               xfs_inode_clear_cowblocks_tag(ip);
+}
+
 /*
  * Free up the underlying blocks past new_size.  The new size must be smaller
  * than the current size.  This routine can be used both for the attribute and
@@ -1583,15 +1601,7 @@ xfs_itruncate_extents(
        if (error)
                goto out;
 
-       /*
-        * Clear the reflink flag if there are no data fork blocks and
-        * there are no extents staged in the cow fork.
-        */
-       if (xfs_is_reflink_inode(ip) && ip->i_cnextents == 0) {
-               if (ip->i_d.di_nblocks == 0)
-                       ip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK;
-               xfs_inode_clear_cowblocks_tag(ip);
-       }
+       xfs_itruncate_clear_reflink_flags(ip);
 
        /*
         * Always re-log the inode so that our permanent transaction can keep
index b2136af..d383e39 100644 (file)
@@ -232,6 +232,7 @@ static inline bool xfs_is_reflink_inode(struct xfs_inode *ip)
  * log recovery to replay a bmap operation on the inode.
  */
 #define XFS_IRECOVERY          (1 << 11)
+#define XFS_ICOWBLOCKS         (1 << 12)/* has the cowblocks tag set */
 
 /*
  * Per-lifetime flags need to be reset when re-using a reclaimable inode during
index cf7c8f8..47aea2e 100644 (file)
@@ -454,6 +454,8 @@ retry:
        if (error)
                goto out_bmap_cancel;
 
+       xfs_inode_set_cowblocks_tag(ip);
+
        /* Finish up. */
        error = xfs_defer_finish(&tp, &dfops);
        if (error)
@@ -490,8 +492,9 @@ xfs_reflink_find_cow_mapping(
        struct xfs_iext_cursor          icur;
 
        ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL | XFS_ILOCK_SHARED));
-       ASSERT(xfs_is_reflink_inode(ip));
 
+       if (!xfs_is_reflink_inode(ip))
+               return false;
        offset_fsb = XFS_B_TO_FSBT(ip->i_mount, offset);
        if (!xfs_iext_lookup_extent(ip, ifp, offset_fsb, &icur, &got))
                return false;
@@ -610,6 +613,9 @@ xfs_reflink_cancel_cow_blocks(
 
                        /* Remove the mapping from the CoW fork. */
                        xfs_bmap_del_extent_cow(ip, &icur, &got, &del);
+               } else {
+                       /* Didn't do anything, push cursor back. */
+                       xfs_iext_prev(ifp, &icur);
                }
 next_extent:
                if (!xfs_iext_get_extent(ifp, &icur, &got))
@@ -725,7 +731,7 @@ xfs_reflink_end_cow(
                        (unsigned int)(end_fsb - offset_fsb),
                        XFS_DATA_FORK);
        error = xfs_trans_alloc(ip->i_mount, &M_RES(ip->i_mount)->tr_write,
-                       resblks, 0, 0, &tp);
+                       resblks, 0, XFS_TRANS_RESERVE, &tp);
        if (error)
                goto out;
 
@@ -1291,6 +1297,17 @@ xfs_reflink_remap_range(
 
        trace_xfs_reflink_remap_range(src, pos_in, len, dest, pos_out);
 
+       /*
+        * Clear out post-eof preallocations because we don't have page cache
+        * backing the delayed allocations and they'll never get freed on
+        * their own.
+        */
+       if (xfs_can_free_eofblocks(dest, true)) {
+               ret = xfs_free_eofblocks(dest);
+               if (ret)
+                       goto out_unlock;
+       }
+
        /* Set flags and remap blocks. */
        ret = xfs_reflink_set_inode_flag(src, dest);
        if (ret)
index 5122d30..1dacccc 100644 (file)
@@ -1360,6 +1360,7 @@ xfs_fs_remount(
                        xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
                        return error;
                }
+               xfs_queue_cowblocks(mp);
 
                /* Create the per-AG metadata reservation pool .*/
                error = xfs_fs_reserve_ag_blocks(mp);
@@ -1369,6 +1370,14 @@ xfs_fs_remount(
 
        /* rw -> ro */
        if (!(mp->m_flags & XFS_MOUNT_RDONLY) && (*flags & SB_RDONLY)) {
+               /* Get rid of any leftover CoW reservations... */
+               cancel_delayed_work_sync(&mp->m_cowblocks_work);
+               error = xfs_icache_free_cowblocks(mp, NULL);
+               if (error) {
+                       xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
+                       return error;
+               }
+
                /* Free the per-AG metadata reservation pool. */
                error = xfs_fs_unreserve_ag_blocks(mp);
                if (error) {
index ea189d8..8ac4e68 100644 (file)
@@ -7,9 +7,10 @@
 #ifndef _ASM_GENERIC_MM_HOOKS_H
 #define _ASM_GENERIC_MM_HOOKS_H
 
-static inline void arch_dup_mmap(struct mm_struct *oldmm,
-                                struct mm_struct *mm)
+static inline int arch_dup_mmap(struct mm_struct *oldmm,
+                               struct mm_struct *mm)
 {
+       return 0;
 }
 
 static inline void arch_exit_mmap(struct mm_struct *mm)
index b234d54..868e685 100644 (file)
@@ -1025,6 +1025,11 @@ static inline int pmd_clear_huge(pmd_t *pmd)
 struct file;
 int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
                        unsigned long size, pgprot_t *vma_prot);
+
+#ifndef CONFIG_X86_ESPFIX64
+static inline void init_espfix_bsp(void) { }
+#endif
+
 #endif /* !__ASSEMBLY__ */
 
 #ifndef io_remap_pfn_range
index cceafa0..b67404f 100644 (file)
@@ -27,6 +27,7 @@ static inline struct mcryptd_ahash *__mcryptd_ahash_cast(
 
 struct mcryptd_cpu_queue {
        struct crypto_queue queue;
+       spinlock_t q_lock;
        struct work_struct work;
 };
 
index 6e45608..9da6ce2 100644 (file)
@@ -62,7 +62,7 @@ struct arch_timer_cpu {
        bool                    enabled;
 };
 
-int kvm_timer_hyp_init(void);
+int kvm_timer_hyp_init(bool);
 int kvm_timer_enable(struct kvm_vcpu *vcpu);
 int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu);
 void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu);
index 82f0c8f..23d29b3 100644 (file)
@@ -492,6 +492,8 @@ extern unsigned int bvec_nr_vecs(unsigned short idx);
 
 #define bio_set_dev(bio, bdev)                         \
 do {                                           \
+       if ((bio)->bi_disk != (bdev)->bd_disk)  \
+               bio_clear_flag(bio, BIO_THROTTLED);\
        (bio)->bi_disk = (bdev)->bd_disk;       \
        (bio)->bi_partno = (bdev)->bd_partno;   \
 } while (0)
index a1e628e..9e7d8bd 100644 (file)
@@ -50,8 +50,6 @@ struct blk_issue_stat {
 struct bio {
        struct bio              *bi_next;       /* request queue link */
        struct gendisk          *bi_disk;
-       u8                      bi_partno;
-       blk_status_t            bi_status;
        unsigned int            bi_opf;         /* bottom bits req flags,
                                                 * top bits REQ_OP. Use
                                                 * accessors.
@@ -59,8 +57,8 @@ struct bio {
        unsigned short          bi_flags;       /* status, etc and bvec pool number */
        unsigned short          bi_ioprio;
        unsigned short          bi_write_hint;
-
-       struct bvec_iter        bi_iter;
+       blk_status_t            bi_status;
+       u8                      bi_partno;
 
        /* Number of segments in this BIO after
         * physical address coalescing is performed.
@@ -74,8 +72,9 @@ struct bio {
        unsigned int            bi_seg_front_size;
        unsigned int            bi_seg_back_size;
 
-       atomic_t                __bi_remaining;
+       struct bvec_iter        bi_iter;
 
+       atomic_t                __bi_remaining;
        bio_end_io_t            *bi_end_io;
 
        void                    *bi_private;
index 8089ca1..0ce8a37 100644 (file)
@@ -135,7 +135,7 @@ typedef __u32 __bitwise req_flags_t;
 struct request {
        struct list_head queuelist;
        union {
-               call_single_data_t csd;
+               struct __call_single_data csd;
                u64 fifo_time;
        };
 
@@ -241,14 +241,24 @@ struct request {
        struct request *next_rq;
 };
 
+static inline bool blk_op_is_scsi(unsigned int op)
+{
+       return op == REQ_OP_SCSI_IN || op == REQ_OP_SCSI_OUT;
+}
+
+static inline bool blk_op_is_private(unsigned int op)
+{
+       return op == REQ_OP_DRV_IN || op == REQ_OP_DRV_OUT;
+}
+
 static inline bool blk_rq_is_scsi(struct request *rq)
 {
-       return req_op(rq) == REQ_OP_SCSI_IN || req_op(rq) == REQ_OP_SCSI_OUT;
+       return blk_op_is_scsi(req_op(rq));
 }
 
 static inline bool blk_rq_is_private(struct request *rq)
 {
-       return req_op(rq) == REQ_OP_DRV_IN || req_op(rq) == REQ_OP_DRV_OUT;
+       return blk_op_is_private(req_op(rq));
 }
 
 static inline bool blk_rq_is_passthrough(struct request *rq)
@@ -256,6 +266,13 @@ static inline bool blk_rq_is_passthrough(struct request *rq)
        return blk_rq_is_scsi(rq) || blk_rq_is_private(rq);
 }
 
+static inline bool bio_is_passthrough(struct bio *bio)
+{
+       unsigned op = bio_op(bio);
+
+       return blk_op_is_scsi(op) || blk_op_is_private(op);
+}
+
 static inline unsigned short req_get_ioprio(struct request *req)
 {
        return req->ioprio;
@@ -948,7 +965,7 @@ extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
 extern void blk_rq_unprep_clone(struct request *rq);
 extern blk_status_t blk_insert_cloned_request(struct request_queue *q,
                                     struct request *rq);
-extern int blk_rq_append_bio(struct request *rq, struct bio *bio);
+extern int blk_rq_append_bio(struct request *rq, struct bio **bio);
 extern void blk_delay_queue(struct request_queue *, unsigned long);
 extern void blk_queue_split(struct request_queue *, struct bio **);
 extern void blk_recount_segments(struct request_queue *, struct bio *);
index da54ef6..9e03046 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/numa.h>
 #include <linux/wait.h>
 
+struct bpf_verifier_env;
 struct perf_event;
 struct bpf_prog;
 struct bpf_map;
@@ -184,14 +185,18 @@ struct bpf_verifier_ops {
                                  struct bpf_prog *prog, u32 *target_size);
 };
 
+struct bpf_prog_offload_ops {
+       int (*insn_hook)(struct bpf_verifier_env *env,
+                        int insn_idx, int prev_insn_idx);
+};
+
 struct bpf_dev_offload {
        struct bpf_prog         *prog;
        struct net_device       *netdev;
        void                    *dev_priv;
        struct list_head        offloads;
        bool                    dev_state;
-       bool                    verifier_running;
-       wait_queue_head_t       verifier_done;
+       const struct bpf_prog_offload_ops *dev_ops;
 };
 
 struct bpf_prog_aux {
@@ -201,6 +206,7 @@ struct bpf_prog_aux {
        u32 stack_depth;
        u32 id;
        u32 func_cnt;
+       bool offload_requested;
        struct bpf_prog **func;
        void *jit_data; /* JIT specific data. arch dependent */
        struct latch_tree_node ksym_tnode;
@@ -351,6 +357,8 @@ void bpf_prog_put(struct bpf_prog *prog);
 int __bpf_prog_charge(struct user_struct *user, u32 pages);
 void __bpf_prog_uncharge(struct user_struct *user, u32 pages);
 
+void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock);
+
 struct bpf_map *bpf_map_get_with_uref(u32 ufd);
 struct bpf_map *__bpf_map_get(struct fd f);
 struct bpf_map * __must_check bpf_map_inc(struct bpf_map *map, bool uref);
@@ -523,13 +531,15 @@ static inline struct bpf_prog *bpf_prog_get_type(u32 ufd,
 
 int bpf_prog_offload_compile(struct bpf_prog *prog);
 void bpf_prog_offload_destroy(struct bpf_prog *prog);
+int bpf_prog_offload_info_fill(struct bpf_prog_info *info,
+                              struct bpf_prog *prog);
 
 #if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL)
 int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr);
 
 static inline bool bpf_prog_is_dev_bound(struct bpf_prog_aux *aux)
 {
-       return aux->offload;
+       return aux->offload_requested;
 }
 #else
 static inline int bpf_prog_offload_init(struct bpf_prog *prog,
@@ -544,7 +554,7 @@ static inline bool bpf_prog_is_dev_bound(struct bpf_prog_aux *aux)
 }
 #endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */
 
-#if defined(CONFIG_STREAM_PARSER) && defined(CONFIG_BPF_SYSCALL)
+#if defined(CONFIG_STREAM_PARSER) && defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_INET)
 struct sock  *__sock_map_lookup_elem(struct bpf_map *map, u32 key);
 int sock_map_prog(struct bpf_map *map, struct bpf_prog *prog, u32 type);
 #else
index 978c1d9..19b8349 100644 (file)
@@ -42,7 +42,7 @@ BPF_MAP_TYPE(BPF_MAP_TYPE_ARRAY_OF_MAPS, array_of_maps_map_ops)
 BPF_MAP_TYPE(BPF_MAP_TYPE_HASH_OF_MAPS, htab_of_maps_map_ops)
 #ifdef CONFIG_NET
 BPF_MAP_TYPE(BPF_MAP_TYPE_DEVMAP, dev_map_ops)
-#ifdef CONFIG_STREAM_PARSER
+#if defined(CONFIG_STREAM_PARSER) && defined(CONFIG_INET)
 BPF_MAP_TYPE(BPF_MAP_TYPE_SOCKMAP, sock_map_ops)
 #endif
 BPF_MAP_TYPE(BPF_MAP_TYPE_CPUMAP, cpu_map_ops)
index aaac589..2feb218 100644 (file)
  * In practice this is far bigger than any realistic pointer offset; this limit
  * ensures that umax_value + (int)off + (int)size cannot overflow a u64.
  */
-#define BPF_MAX_VAR_OFF        (1ULL << 31)
+#define BPF_MAX_VAR_OFF        (1 << 29)
 /* Maximum variable size permitted for ARG_CONST_SIZE[_OR_ZERO].  This ensures
  * that converting umax_value to int cannot overflow.
  */
-#define BPF_MAX_VAR_SIZ        INT_MAX
+#define BPF_MAX_VAR_SIZ        (1 << 29)
 
 /* Liveness marks, used for registers and spilled-regs (in stack slots).
  * Read marks propagate upwards until they find a write mark; they record that
@@ -166,12 +166,6 @@ static inline bool bpf_verifier_log_full(const struct bpf_verifer_log *log)
        return log->len_used >= log->len_total - 1;
 }
 
-struct bpf_verifier_env;
-struct bpf_ext_analyzer_ops {
-       int (*insn_hook)(struct bpf_verifier_env *env,
-                        int insn_idx, int prev_insn_idx);
-};
-
 #define BPF_MAX_SUBPROGS 256
 
 /* single container for all structs
@@ -185,7 +179,6 @@ struct bpf_verifier_env {
        bool strict_alignment;          /* perform strict pointer alignment checks */
        struct bpf_verifier_state *cur_state; /* current verifier state */
        struct bpf_verifier_state_list **explored_states; /* search pruning optimization */
-       const struct bpf_ext_analyzer_ops *dev_ops; /* device analyzer ops */
        struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */
        u32 used_map_cnt;               /* number of used maps */
        u32 id_gen;                     /* used to generate unique reg IDs */
@@ -194,6 +187,7 @@ struct bpf_verifier_env {
        struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */
        struct bpf_verifer_log log;
        u32 subprog_starts[BPF_MAX_SUBPROGS];
+       /* computes the stack depth of each bpf function */
        u16 subprog_stack_depth[BPF_MAX_SUBPROGS + 1];
        u32 subprog_cnt;
 };
@@ -205,13 +199,8 @@ static inline struct bpf_reg_state *cur_regs(struct bpf_verifier_env *env)
        return cur->frame[cur->curframe]->regs;
 }
 
-#if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL)
 int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env);
-#else
-static inline int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env)
-{
-       return -EOPNOTSUPP;
-}
-#endif
+int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env,
+                                int insn_idx, int prev_insn_idx);
 
 #endif /* _LINUX_BPF_VERIFIER_H */
index b6514c2..b4f2211 100644 (file)
@@ -23,7 +23,7 @@ struct lan9303 {
        struct regmap_irq_chip_data *irq_data;
        struct gpio_desc *reset_gpio;
        u32 reset_duration; /* in [ms] */
-       bool phy_addr_sel_strap;
+       int phy_addr_base;
        struct dsa_switch *ds;
        struct mutex indirect_mutex; /* protect indexed register access */
        struct mutex alr_mutex; /* protect ALR access */
index e872b4e..425056c 100644 (file)
@@ -18,7 +18,9 @@
 #include <linux/capability.h>
 #include <linux/cryptohash.h>
 #include <linux/set_memory.h>
+#include <linux/kallsyms.h>
 
+#include <net/xdp.h>
 #include <net/sch_generic.h>
 
 #include <uapi/linux/filter.h>
@@ -502,6 +504,7 @@ struct xdp_buff {
        void *data_end;
        void *data_meta;
        void *data_hard_start;
+       struct xdp_rxq_info *rxq;
 };
 
 /* Compute the linear packet data range [data, data_end) which
@@ -724,6 +727,14 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog);
 void bpf_jit_compile(struct bpf_prog *prog);
 bool bpf_helper_changes_pkt_data(void *func);
 
+static inline bool bpf_dump_raw_ok(void)
+{
+       /* Reconstruction of call-sites is dependent on kallsyms,
+        * thus make dump the same restriction.
+        */
+       return kallsyms_show_value() == 1;
+}
+
 struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
                                       const struct bpf_insn *patch, u32 len);
 
similarity index 94%
rename from include/linux/pti.h
rename to include/linux/intel-pti.h
index b3ea01a..2710d72 100644 (file)
@@ -22,8 +22,8 @@
  * interface to write out it's contents for debugging a mobile system.
  */
 
-#ifndef PTI_H_
-#define PTI_H_
+#ifndef LINUX_INTEL_PTI_H_
+#define LINUX_INTEL_PTI_H_
 
 /* offset for last dword of any PTI message. Part of MIPI P1149.7 */
 #define PTI_LASTDWORD_DTS      0x30
@@ -40,4 +40,4 @@ struct pti_masterchannel *pti_request_masterchannel(u8 type,
                                                    const char *thread_name);
 void pti_release_masterchannel(struct pti_masterchannel *mc);
 
-#endif /*PTI_H_*/
+#endif /* LINUX_INTEL_PTI_H_ */
index cb18c62..8415bf1 100644 (file)
@@ -273,7 +273,8 @@ struct ipv6_pinfo {
                                                 * 100: prefer care-of address
                                                 */
                                dontfrag:1,
-                               autoflowlabel:1;
+                               autoflowlabel:1,
+                               autoflowlabel_set:1;
        __u8                    min_hopcount;
        __u8                    tclass;
        __be32                  rcv_flowinfo;
index e37c21d..2cfffe5 100644 (file)
@@ -41,8 +41,8 @@ struct mdio_device {
        int addr;
        int flags;
        struct gpio_desc *reset;
-       unsigned int reset_delay;
-       unsigned int reset_post_delay;
+       unsigned int reset_assert_delay;
+       unsigned int reset_deassert_delay;
 };
 #define to_mdio_device(d) container_of(d, struct mdio_device, dev)
 
@@ -262,6 +262,9 @@ static inline u16 ethtool_adv_to_mmd_eee_adv_t(u32 adv)
        return reg;
 }
 
+int __mdiobus_read(struct mii_bus *bus, int addr, u32 regnum);
+int __mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val);
+
 int mdiobus_read(struct mii_bus *bus, int addr, u32 regnum);
 int mdiobus_read_nested(struct mii_bus *bus, int addr, u32 regnum);
 int mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val);
index a2a1318..c3d3f04 100644 (file)
@@ -915,10 +915,10 @@ enum PDEV_STAT  {PDEV_STAT_IDLE, PDEV_STAT_RUN};
 #define LTR_L1SS_PWR_GATE_CHECK_CARD_EN        BIT(6)
 
 enum dev_aspm_mode {
-       DEV_ASPM_DISABLE = 0,
        DEV_ASPM_DYNAMIC,
        DEV_ASPM_BACKDOOR,
        DEV_ASPM_STATIC,
+       DEV_ASPM_DISABLE,
 };
 
 /*
index a886b51..1f509d0 100644 (file)
@@ -556,6 +556,7 @@ struct mlx5_core_sriov {
 };
 
 struct mlx5_irq_info {
+       cpumask_var_t mask;
        char name[MLX5_MAX_IRQ_NAME];
 };
 
@@ -1048,7 +1049,7 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
                       enum mlx5_eq_type type);
 int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
 int mlx5_start_eqs(struct mlx5_core_dev *dev);
-int mlx5_stop_eqs(struct mlx5_core_dev *dev);
+void mlx5_stop_eqs(struct mlx5_core_dev *dev);
 int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
                    unsigned int *irqn);
 int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
@@ -1164,6 +1165,10 @@ int mlx5_cmd_create_vport_lag(struct mlx5_core_dev *dev);
 int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev);
 bool mlx5_lag_is_active(struct mlx5_core_dev *dev);
 struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev);
+int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
+                                u64 *values,
+                                int num_counters,
+                                size_t *offsets);
 struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev);
 void mlx5_put_uars_page(struct mlx5_core_dev *mdev, struct mlx5_uars_page *up);
 
index b25e7ba..a0b48af 100644 (file)
@@ -95,6 +95,10 @@ struct mlx5_flow_destination {
 struct mlx5_flow_namespace *
 mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
                        enum mlx5_flow_namespace_type type);
+struct mlx5_flow_namespace *
+mlx5_get_flow_vport_acl_namespace(struct mlx5_core_dev *dev,
+                                 enum mlx5_flow_namespace_type type,
+                                 int vport);
 
 struct mlx5_flow_table *
 mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
index 38a7577..d44ec5f 100644 (file)
@@ -147,7 +147,7 @@ enum {
        MLX5_CMD_OP_ALLOC_Q_COUNTER               = 0x771,
        MLX5_CMD_OP_DEALLOC_Q_COUNTER             = 0x772,
        MLX5_CMD_OP_QUERY_Q_COUNTER               = 0x773,
-       MLX5_CMD_OP_SET_RATE_LIMIT                = 0x780,
+       MLX5_CMD_OP_SET_PP_RATE_LIMIT             = 0x780,
        MLX5_CMD_OP_QUERY_RATE_LIMIT              = 0x781,
        MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT      = 0x782,
        MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT     = 0x783,
@@ -7239,7 +7239,7 @@ struct mlx5_ifc_add_vxlan_udp_dport_in_bits {
        u8         vxlan_udp_port[0x10];
 };
 
-struct mlx5_ifc_set_rate_limit_out_bits {
+struct mlx5_ifc_set_pp_rate_limit_out_bits {
        u8         status[0x8];
        u8         reserved_at_8[0x18];
 
@@ -7248,7 +7248,7 @@ struct mlx5_ifc_set_rate_limit_out_bits {
        u8         reserved_at_40[0x40];
 };
 
-struct mlx5_ifc_set_rate_limit_in_bits {
+struct mlx5_ifc_set_pp_rate_limit_in_bits {
        u8         opcode[0x10];
        u8         reserved_at_10[0x10];
 
@@ -7261,6 +7261,8 @@ struct mlx5_ifc_set_rate_limit_in_bits {
        u8         reserved_at_60[0x20];
 
        u8         rate_limit[0x20];
+
+       u8         reserved_at_a0[0x160];
 };
 
 struct mlx5_ifc_access_register_out_bits {
index cc4ce74..440b000 100644 (file)
@@ -44,6 +44,7 @@
 #include <net/dcbnl.h>
 #endif
 #include <net/netprio_cgroup.h>
+#include <net/xdp.h>
 
 #include <linux/netdev_features.h>
 #include <linux/neighbour.h>
@@ -686,6 +687,7 @@ struct netdev_rx_queue {
 #endif
        struct kobject                  kobj;
        struct net_device               *dev;
+       struct xdp_rxq_info             xdp_rxq;
 } ____cacheline_aligned_in_smp;
 
 /*
@@ -804,7 +806,7 @@ enum bpf_netdev_command {
        BPF_OFFLOAD_DESTROY,
 };
 
-struct bpf_ext_analyzer_ops;
+struct bpf_prog_offload_ops;
 struct netlink_ext_ack;
 
 struct netdev_bpf {
@@ -826,7 +828,7 @@ struct netdev_bpf {
                /* BPF_OFFLOAD_VERIFIER_PREP */
                struct {
                        struct bpf_prog *prog;
-                       const struct bpf_ext_analyzer_ops *ops; /* callee set */
+                       const struct bpf_prog_offload_ops *ops; /* callee set */
                } verifier;
                /* BPF_OFFLOAD_TRANSLATE, BPF_OFFLOAD_DESTROY */
                struct {
@@ -1726,7 +1728,7 @@ struct net_device {
        const struct ndisc_ops *ndisc_ops;
 #endif
 
-#ifdef CONFIG_XFRM
+#ifdef CONFIG_XFRM_OFFLOAD
        const struct xfrmdev_ops *xfrmdev_ops;
 #endif
 
@@ -2793,7 +2795,9 @@ struct softnet_data {
        struct Qdisc            *output_queue;
        struct Qdisc            **output_queue_tailp;
        struct sk_buff          *completion_queue;
-
+#ifdef CONFIG_XFRM_OFFLOAD
+       struct sk_buff_head     xfrm_backlog;
+#endif
 #ifdef CONFIG_RPS
        /* input_queue_head should be written by cpu owning this struct,
         * and only read by other cpus. Worth using a cache line.
@@ -3325,7 +3329,7 @@ int dev_get_phys_port_id(struct net_device *dev,
 int dev_get_phys_port_name(struct net_device *dev,
                           char *name, size_t len);
 int dev_change_proto_down(struct net_device *dev, bool proto_down);
-struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev);
+struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again);
 struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
                                    struct netdev_queue *txq, int *ret);
 
index c4b4715..135aba5 100644 (file)
@@ -634,6 +634,9 @@ struct phy_driver {
        int (*write_mmd)(struct phy_device *dev, int devnum, u16 regnum,
                         u16 val);
 
+       int (*read_page)(struct phy_device *dev);
+       int (*write_page)(struct phy_device *dev, int page);
+
        /* Get the size and type of the eeprom contained within a plug-in
         * module */
        int (*module_info)(struct phy_device *dev,
@@ -690,6 +693,8 @@ phy_lookup_setting(int speed, int duplex, const unsigned long *mask,
 size_t phy_speeds(unsigned int *speeds, size_t size,
                  unsigned long *mask, size_t maxbit);
 
+void phy_resolve_aneg_linkmode(struct phy_device *phydev);
+
 /**
  * phy_read_mmd - Convenience function for reading a register
  * from an MMD on a given PHY.
@@ -716,6 +721,18 @@ static inline int phy_read(struct phy_device *phydev, u32 regnum)
 }
 
 /**
+ * __phy_read - convenience function for reading a given PHY register
+ * @phydev: the phy_device struct
+ * @regnum: register number to read
+ *
+ * The caller must have taken the MDIO bus lock.
+ */
+static inline int __phy_read(struct phy_device *phydev, u32 regnum)
+{
+       return __mdiobus_read(phydev->mdio.bus, phydev->mdio.addr, regnum);
+}
+
+/**
  * phy_write - Convenience function for writing a given PHY register
  * @phydev: the phy_device struct
  * @regnum: register number to write
@@ -731,6 +748,23 @@ static inline int phy_write(struct phy_device *phydev, u32 regnum, u16 val)
 }
 
 /**
+ * __phy_write - Convenience function for writing a given PHY register
+ * @phydev: the phy_device struct
+ * @regnum: register number to write
+ * @val: value to write to @regnum
+ *
+ * The caller must have taken the MDIO bus lock.
+ */
+static inline int __phy_write(struct phy_device *phydev, u32 regnum, u16 val)
+{
+       return __mdiobus_write(phydev->mdio.bus, phydev->mdio.addr, regnum,
+                              val);
+}
+
+int __phy_modify(struct phy_device *phydev, u32 regnum, u16 mask, u16 set);
+int phy_modify(struct phy_device *phydev, u32 regnum, u16 mask, u16 set);
+
+/**
  * phy_interrupt_is_valid - Convenience function for testing a given PHY irq
  * @phydev: the phy_device struct
  *
@@ -808,6 +842,14 @@ static inline bool phy_is_pseudo_fixed_link(struct phy_device *phydev)
  */
 int phy_write_mmd(struct phy_device *phydev, int devad, u32 regnum, u16 val);
 
+int phy_save_page(struct phy_device *phydev);
+int phy_select_page(struct phy_device *phydev, int page);
+int phy_restore_page(struct phy_device *phydev, int oldpage, int ret);
+int phy_read_paged(struct phy_device *phydev, int page, u32 regnum);
+int phy_write_paged(struct phy_device *phydev, int page, u32 regnum, u16 val);
+int phy_modify_paged(struct phy_device *phydev, int page, u32 regnum,
+                    u16 mask, u16 set);
+
 struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id,
                                     bool is_c45,
                                     struct phy_c45_device_ids *c45_ids);
@@ -901,6 +943,7 @@ int genphy_c45_read_lpa(struct phy_device *phydev);
 int genphy_c45_read_pma(struct phy_device *phydev);
 int genphy_c45_pma_setup_forced(struct phy_device *phydev);
 int genphy_c45_an_disable_aneg(struct phy_device *phydev);
+int genphy_c45_read_mdix(struct phy_device *phydev);
 
 static inline int phy_read_status(struct phy_device *phydev)
 {
index cf6392d..ee54453 100644 (file)
@@ -24,9 +24,6 @@ extern void fixed_phy_unregister(struct phy_device *phydev);
 extern int fixed_phy_set_link_update(struct phy_device *phydev,
                        int (*link_update)(struct net_device *,
                                           struct fixed_phy_status *));
-extern int fixed_phy_update_state(struct phy_device *phydev,
-                          const struct fixed_phy_status *status,
-                          const struct fixed_phy_status *changed);
 #else
 static inline int fixed_phy_add(unsigned int irq, int phy_id,
                                struct fixed_phy_status *status,
@@ -50,12 +47,6 @@ static inline int fixed_phy_set_link_update(struct phy_device *phydev,
 {
        return -ENODEV;
 }
-static inline int fixed_phy_update_state(struct phy_device *phydev,
-                          const struct fixed_phy_status *status,
-                          const struct fixed_phy_status *changed)
-{
-       return -ENODEV;
-}
 #endif /* CONFIG_FIXED_PHY */
 
 #endif /* __PHY_FIXED_H */
index 2ff18c9..d31cb62 100644 (file)
@@ -78,6 +78,9 @@ extern struct file *proc_ns_fget(int fd);
 #define get_proc_ns(inode) ((struct ns_common *)(inode)->i_private)
 extern void *ns_get_path(struct path *path, struct task_struct *task,
                        const struct proc_ns_operations *ns_ops);
+typedef struct ns_common *ns_get_path_helper_t(void *);
+extern void *ns_get_path_cb(struct path *path, ns_get_path_helper_t ns_get_cb,
+                           void *private_data);
 
 extern int ns_get_name(char *buf, size_t size, struct task_struct *task,
                        const struct proc_ns_operations *ns_ops);
index 6866df4..13fb06a 100644 (file)
@@ -447,7 +447,12 @@ static inline int ptr_ring_consume_batched_bh(struct ptr_ring *r,
 
 static inline void **__ptr_ring_init_queue_alloc(unsigned int size, gfp_t gfp)
 {
-       return kcalloc(size, sizeof(void *), gfp);
+       /* Allocate an extra dummy element at end of ring to avoid consumer head
+        * or produce head access past the end of the array. Possible when
+        * producer/consumer operations and __ptr_ring_peek operations run in
+        * parallel.
+        */
+       return kcalloc(size + 1, sizeof(void *), gfp);
 }
 
 static inline void __ptr_ring_set_size(struct ptr_ring *r, int size)
index 39e2a2a..2b3b350 100644 (file)
 
 #ifndef _COMMON_HSI_H
 #define _COMMON_HSI_H
+
 #include <linux/types.h>
 #include <asm/byteorder.h>
 #include <linux/bitops.h>
 #include <linux/slab.h>
 
 /* dma_addr_t manip */
-#define PTR_LO(x)               ((u32)(((uintptr_t)(x)) & 0xffffffff))
-#define PTR_HI(x)               ((u32)((((uintptr_t)(x)) >> 16) >> 16))
+#define PTR_LO(x)              ((u32)(((uintptr_t)(x)) & 0xffffffff))
+#define PTR_HI(x)              ((u32)((((uintptr_t)(x)) >> 16) >> 16))
 #define DMA_LO_LE(x)           cpu_to_le32(lower_32_bits(x))
 #define DMA_HI_LE(x)           cpu_to_le32(upper_32_bits(x))
 #define DMA_REGPAIR_LE(x, val) do { \
                                        (x).lo = DMA_LO_LE((val)); \
                                } while (0)
 
-#define HILO_GEN(hi, lo, type)  ((((type)(hi)) << 32) + (lo))
-#define HILO_64(hi, lo) HILO_GEN((le32_to_cpu(hi)), (le32_to_cpu(lo)), u64)
-#define HILO_64_REGPAIR(regpair)        (HILO_64(regpair.hi, regpair.lo))
+#define HILO_GEN(hi, lo, type)         ((((type)(hi)) << 32) + (lo))
+#define HILO_64(hi, lo) \
+       HILO_GEN(le32_to_cpu(hi), le32_to_cpu(lo), u64)
+#define HILO_64_REGPAIR(regpair) ({ \
+       typeof(regpair) __regpair = (regpair); \
+       HILO_64(__regpair.hi, __regpair.lo); })
 #define HILO_DMA_REGPAIR(regpair)      ((dma_addr_t)HILO_64_REGPAIR(regpair))
 
 #ifndef __COMMON_HSI__
 #define __COMMON_HSI__
 
+/********************************/
+/* PROTOCOL COMMON FW CONSTANTS */
+/********************************/
 
-#define X_FINAL_CLEANUP_AGG_INT 1
+#define X_FINAL_CLEANUP_AGG_INT                        1
 
-#define EVENT_RING_PAGE_SIZE_BYTES          4096
+#define EVENT_RING_PAGE_SIZE_BYTES             4096
 
-#define NUM_OF_GLOBAL_QUEUES                            128
-#define COMMON_QUEUE_ENTRY_MAX_BYTE_SIZE        64
+#define NUM_OF_GLOBAL_QUEUES                   128
+#define COMMON_QUEUE_ENTRY_MAX_BYTE_SIZE       64
 
-#define ISCSI_CDU_TASK_SEG_TYPE       0
-#define FCOE_CDU_TASK_SEG_TYPE        0
-#define RDMA_CDU_TASK_SEG_TYPE        1
+#define ISCSI_CDU_TASK_SEG_TYPE                        0
+#define FCOE_CDU_TASK_SEG_TYPE                 0
+#define RDMA_CDU_TASK_SEG_TYPE                 1
 
-#define FW_ASSERT_GENERAL_ATTN_IDX    32
+#define FW_ASSERT_GENERAL_ATTN_IDX             32
 
-#define MAX_PINNED_CCFC                 32
+#define MAX_PINNED_CCFC                                32
 
 /* Queue Zone sizes in bytes */
-#define TSTORM_QZONE_SIZE 8
-#define MSTORM_QZONE_SIZE 16
-#define USTORM_QZONE_SIZE 8
-#define XSTORM_QZONE_SIZE 8
-#define YSTORM_QZONE_SIZE 0
-#define PSTORM_QZONE_SIZE 0
-
-#define MSTORM_VF_ZONE_DEFAULT_SIZE_LOG        7
+#define TSTORM_QZONE_SIZE      8
+#define MSTORM_QZONE_SIZE      16
+#define USTORM_QZONE_SIZE      8
+#define XSTORM_QZONE_SIZE      8
+#define YSTORM_QZONE_SIZE      0
+#define PSTORM_QZONE_SIZE      0
+
+#define MSTORM_VF_ZONE_DEFAULT_SIZE_LOG                7
 #define ETH_MAX_NUM_RX_QUEUES_PER_VF_DEFAULT   16
 #define ETH_MAX_NUM_RX_QUEUES_PER_VF_DOUBLE    48
 #define ETH_MAX_NUM_RX_QUEUES_PER_VF_QUAD      112
 #define MAX_NUM_LL2_TX_STATS_COUNTERS  48
 
 #define FW_MAJOR_VERSION       8
-#define FW_MINOR_VERSION       20
-#define FW_REVISION_VERSION    0
+#define FW_MINOR_VERSION       33
+#define FW_REVISION_VERSION    1
 #define FW_ENGINEERING_VERSION 0
 
 /***********************/
 #define MAX_NUM_PORTS_BB       (2)
 #define MAX_NUM_PORTS          (MAX_NUM_PORTS_K2)
 
-#define MAX_NUM_PFS_K2 (16)
-#define MAX_NUM_PFS_BB (8)
-#define MAX_NUM_PFS    (MAX_NUM_PFS_K2)
-#define MAX_NUM_OF_PFS_IN_CHIP (16) /* On both engines */
+#define MAX_NUM_PFS_K2         (16)
+#define MAX_NUM_PFS_BB         (8)
+#define MAX_NUM_PFS            (MAX_NUM_PFS_K2)
+#define MAX_NUM_OF_PFS_IN_CHIP (16) /* On both engines */
 
 #define MAX_NUM_VFS_K2 (192)
 #define MAX_NUM_VFS_BB (120)
 /* Traffic classes in network-facing blocks (PBF, BTB, NIG, BRB, PRS and QM) */
 #define NUM_PHYS_TCS_4PORT_K2  (4)
 #define NUM_OF_PHYS_TCS                (8)
-
+#define PURE_LB_TC             NUM_OF_PHYS_TCS
 #define NUM_TCS_4PORT_K2       (NUM_PHYS_TCS_4PORT_K2 + 1)
 #define NUM_OF_TCS             (NUM_OF_PHYS_TCS + 1)
 
-#define LB_TC                  (NUM_OF_PHYS_TCS)
-
-/* Num of possible traffic priority values */
-#define NUM_OF_PRIO            (8)
-
-#define MAX_NUM_VOQS_K2                (NUM_TCS_4PORT_K2 * MAX_NUM_PORTS_K2)
-#define MAX_NUM_VOQS_BB                (NUM_OF_TCS * MAX_NUM_PORTS_BB)
-#define MAX_NUM_VOQS           (MAX_NUM_VOQS_K2)
-#define MAX_PHYS_VOQS          (NUM_OF_PHYS_TCS * MAX_NUM_PORTS_BB)
-
 /* CIDs */
-#define NUM_OF_CONNECTION_TYPES        (8)
-#define NUM_OF_LCIDS           (320)
-#define NUM_OF_LTIDS           (320)
-
-/* Clock values */
-#define MASTER_CLK_FREQ_E4     (375e6)
-#define STORM_CLK_FREQ_E4      (1000e6)
-#define CLK25M_CLK_FREQ_E4     (25e6)
+#define NUM_OF_CONNECTION_TYPES_E4     (8)
+#define NUM_OF_LCIDS                   (320)
+#define NUM_OF_LTIDS                   (320)
 
 /* Global PXP windows (GTT) */
 #define NUM_OF_GTT             19
 #define GTT_DWORD_SIZE         BIT(GTT_DWORD_SIZE_BITS)
 
 /* Tools Version */
-#define TOOLS_VERSION 10
+#define TOOLS_VERSION  10
 
 /*****************/
 /* CDU CONSTANTS */
 /*****************/
 
-#define CDU_SEG_TYPE_OFFSET_REG_TYPE_SHIFT              (17)
-#define CDU_SEG_TYPE_OFFSET_REG_OFFSET_MASK             (0x1ffff)
+#define CDU_SEG_TYPE_OFFSET_REG_TYPE_SHIFT                     (17)
+#define CDU_SEG_TYPE_OFFSET_REG_OFFSET_MASK                    (0x1ffff)
 
-#define CDU_VF_FL_SEG_TYPE_OFFSET_REG_TYPE_SHIFT       (12)
-#define CDU_VF_FL_SEG_TYPE_OFFSET_REG_OFFSET_MASK      (0xfff)
+#define CDU_VF_FL_SEG_TYPE_OFFSET_REG_TYPE_SHIFT               (12)
+#define CDU_VF_FL_SEG_TYPE_OFFSET_REG_OFFSET_MASK              (0xfff)
 
 #define CDU_CONTEXT_VALIDATION_CFG_ENABLE_SHIFT                        (0)
 #define CDU_CONTEXT_VALIDATION_CFG_VALIDATION_TYPE_SHIFT       (1)
 #define DQ_DEMS_TOE_LOCAL_ADV_WND      4
 #define DQ_DEMS_ROCE_CQ_CONS           7
 
-/* XCM agg val selection */
-#define DQ_XCM_AGG_VAL_SEL_WORD2  0
-#define DQ_XCM_AGG_VAL_SEL_WORD3  1
-#define DQ_XCM_AGG_VAL_SEL_WORD4  2
-#define DQ_XCM_AGG_VAL_SEL_WORD5  3
-#define DQ_XCM_AGG_VAL_SEL_REG3   4
-#define DQ_XCM_AGG_VAL_SEL_REG4   5
-#define DQ_XCM_AGG_VAL_SEL_REG5   6
-#define DQ_XCM_AGG_VAL_SEL_REG6   7
-
-/* XCM agg val selection */
-#define        DQ_XCM_CORE_TX_BD_CONS_CMD      DQ_XCM_AGG_VAL_SEL_WORD3
-#define        DQ_XCM_CORE_TX_BD_PROD_CMD      DQ_XCM_AGG_VAL_SEL_WORD4
-#define        DQ_XCM_CORE_SPQ_PROD_CMD        DQ_XCM_AGG_VAL_SEL_WORD4
-#define        DQ_XCM_ETH_EDPM_NUM_BDS_CMD     DQ_XCM_AGG_VAL_SEL_WORD2
-#define        DQ_XCM_ETH_TX_BD_CONS_CMD       DQ_XCM_AGG_VAL_SEL_WORD3
-#define        DQ_XCM_ETH_TX_BD_PROD_CMD       DQ_XCM_AGG_VAL_SEL_WORD4
-#define        DQ_XCM_ETH_GO_TO_BD_CONS_CMD    DQ_XCM_AGG_VAL_SEL_WORD5
-#define DQ_XCM_FCOE_SQ_CONS_CMD             DQ_XCM_AGG_VAL_SEL_WORD3
-#define DQ_XCM_FCOE_SQ_PROD_CMD             DQ_XCM_AGG_VAL_SEL_WORD4
-#define DQ_XCM_FCOE_X_FERQ_PROD_CMD         DQ_XCM_AGG_VAL_SEL_WORD5
-#define DQ_XCM_ISCSI_SQ_CONS_CMD       DQ_XCM_AGG_VAL_SEL_WORD3
-#define DQ_XCM_ISCSI_SQ_PROD_CMD       DQ_XCM_AGG_VAL_SEL_WORD4
-#define DQ_XCM_ISCSI_MORE_TO_SEND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG3
-#define DQ_XCM_ISCSI_EXP_STAT_SN_CMD   DQ_XCM_AGG_VAL_SEL_REG6
-#define DQ_XCM_ROCE_SQ_PROD_CMD        DQ_XCM_AGG_VAL_SEL_WORD4
-#define DQ_XCM_TOE_TX_BD_PROD_CMD      DQ_XCM_AGG_VAL_SEL_WORD4
-#define DQ_XCM_TOE_MORE_TO_SEND_SEQ_CMD        DQ_XCM_AGG_VAL_SEL_REG3
-#define DQ_XCM_TOE_LOCAL_ADV_WND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG4
+/* XCM agg val selection (HW) */
+#define DQ_XCM_AGG_VAL_SEL_WORD2       0
+#define DQ_XCM_AGG_VAL_SEL_WORD3       1
+#define DQ_XCM_AGG_VAL_SEL_WORD4       2
+#define DQ_XCM_AGG_VAL_SEL_WORD5       3
+#define DQ_XCM_AGG_VAL_SEL_REG3                4
+#define DQ_XCM_AGG_VAL_SEL_REG4                5
+#define DQ_XCM_AGG_VAL_SEL_REG5                6
+#define DQ_XCM_AGG_VAL_SEL_REG6                7
+
+/* XCM agg val selection (FW) */
+#define        DQ_XCM_CORE_TX_BD_CONS_CMD              DQ_XCM_AGG_VAL_SEL_WORD3
+#define        DQ_XCM_CORE_TX_BD_PROD_CMD              DQ_XCM_AGG_VAL_SEL_WORD4
+#define        DQ_XCM_CORE_SPQ_PROD_CMD                DQ_XCM_AGG_VAL_SEL_WORD4
+#define        DQ_XCM_ETH_EDPM_NUM_BDS_CMD             DQ_XCM_AGG_VAL_SEL_WORD2
+#define        DQ_XCM_ETH_TX_BD_CONS_CMD               DQ_XCM_AGG_VAL_SEL_WORD3
+#define        DQ_XCM_ETH_TX_BD_PROD_CMD               DQ_XCM_AGG_VAL_SEL_WORD4
+#define        DQ_XCM_ETH_GO_TO_BD_CONS_CMD            DQ_XCM_AGG_VAL_SEL_WORD5
+#define DQ_XCM_FCOE_SQ_CONS_CMD                        DQ_XCM_AGG_VAL_SEL_WORD3
+#define DQ_XCM_FCOE_SQ_PROD_CMD                        DQ_XCM_AGG_VAL_SEL_WORD4
+#define DQ_XCM_FCOE_X_FERQ_PROD_CMD            DQ_XCM_AGG_VAL_SEL_WORD5
+#define DQ_XCM_ISCSI_SQ_CONS_CMD               DQ_XCM_AGG_VAL_SEL_WORD3
+#define DQ_XCM_ISCSI_SQ_PROD_CMD               DQ_XCM_AGG_VAL_SEL_WORD4
+#define DQ_XCM_ISCSI_MORE_TO_SEND_SEQ_CMD      DQ_XCM_AGG_VAL_SEL_REG3
+#define DQ_XCM_ISCSI_EXP_STAT_SN_CMD           DQ_XCM_AGG_VAL_SEL_REG6
+#define DQ_XCM_ROCE_SQ_PROD_CMD                        DQ_XCM_AGG_VAL_SEL_WORD4
+#define DQ_XCM_TOE_TX_BD_PROD_CMD              DQ_XCM_AGG_VAL_SEL_WORD4
+#define DQ_XCM_TOE_MORE_TO_SEND_SEQ_CMD                DQ_XCM_AGG_VAL_SEL_REG3
+#define DQ_XCM_TOE_LOCAL_ADV_WND_SEQ_CMD       DQ_XCM_AGG_VAL_SEL_REG4
 
 /* UCM agg val selection (HW) */
 #define        DQ_UCM_AGG_VAL_SEL_WORD0        0
 #define        DQ_UCM_AGG_VAL_SEL_WORD1        1
 #define        DQ_UCM_AGG_VAL_SEL_WORD2        2
 #define        DQ_UCM_AGG_VAL_SEL_WORD3        3
-#define        DQ_UCM_AGG_VAL_SEL_REG0 4
-#define        DQ_UCM_AGG_VAL_SEL_REG1 5
-#define        DQ_UCM_AGG_VAL_SEL_REG2 6
-#define        DQ_UCM_AGG_VAL_SEL_REG3 7
+#define        DQ_UCM_AGG_VAL_SEL_REG0         4
+#define        DQ_UCM_AGG_VAL_SEL_REG1         5
+#define        DQ_UCM_AGG_VAL_SEL_REG2         6
+#define        DQ_UCM_AGG_VAL_SEL_REG3         7
 
 /* UCM agg val selection (FW) */
 #define DQ_UCM_ETH_PMD_TX_CONS_CMD     DQ_UCM_AGG_VAL_SEL_WORD2
 #define DQ_TCM_ROCE_RQ_PROD_CMD        \
        DQ_TCM_AGG_VAL_SEL_WORD0
 
-/* XCM agg counter flag selection */
+/* XCM agg counter flag selection (HW) */
 #define        DQ_XCM_AGG_FLG_SHIFT_BIT14      0
 #define        DQ_XCM_AGG_FLG_SHIFT_BIT15      1
 #define        DQ_XCM_AGG_FLG_SHIFT_CF12       2
 #define        DQ_XCM_AGG_FLG_SHIFT_CF22       6
 #define        DQ_XCM_AGG_FLG_SHIFT_CF23       7
 
-/* XCM agg counter flag selection */
-#define DQ_XCM_CORE_DQ_CF_CMD          BIT(DQ_XCM_AGG_FLG_SHIFT_CF18)
-#define DQ_XCM_CORE_TERMINATE_CMD      BIT(DQ_XCM_AGG_FLG_SHIFT_CF19)
-#define DQ_XCM_CORE_SLOW_PATH_CMD      BIT(DQ_XCM_AGG_FLG_SHIFT_CF22)
-#define DQ_XCM_ETH_DQ_CF_CMD           BIT(DQ_XCM_AGG_FLG_SHIFT_CF18)
-#define DQ_XCM_ETH_TERMINATE_CMD       BIT(DQ_XCM_AGG_FLG_SHIFT_CF19)
-#define DQ_XCM_ETH_SLOW_PATH_CMD       BIT(DQ_XCM_AGG_FLG_SHIFT_CF22)
-#define DQ_XCM_ETH_TPH_EN_CMD          BIT(DQ_XCM_AGG_FLG_SHIFT_CF23)
-#define DQ_XCM_FCOE_SLOW_PATH_CMD           BIT(DQ_XCM_AGG_FLG_SHIFT_CF22)
-#define DQ_XCM_ISCSI_DQ_FLUSH_CMD      BIT(DQ_XCM_AGG_FLG_SHIFT_CF19)
-#define DQ_XCM_ISCSI_SLOW_PATH_CMD     BIT(DQ_XCM_AGG_FLG_SHIFT_CF22)
-#define DQ_XCM_ISCSI_PROC_ONLY_CLEANUP_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF23)
-#define DQ_XCM_TOE_DQ_FLUSH_CMD                BIT(DQ_XCM_AGG_FLG_SHIFT_CF19)
-#define DQ_XCM_TOE_SLOW_PATH_CMD       BIT(DQ_XCM_AGG_FLG_SHIFT_CF22)
+/* XCM agg counter flag selection (FW) */
+#define DQ_XCM_CORE_DQ_CF_CMD                  BIT(DQ_XCM_AGG_FLG_SHIFT_CF18)
+#define DQ_XCM_CORE_TERMINATE_CMD              BIT(DQ_XCM_AGG_FLG_SHIFT_CF19)
+#define DQ_XCM_CORE_SLOW_PATH_CMD              BIT(DQ_XCM_AGG_FLG_SHIFT_CF22)
+#define DQ_XCM_ETH_DQ_CF_CMD                   BIT(DQ_XCM_AGG_FLG_SHIFT_CF18)
+#define DQ_XCM_ETH_TERMINATE_CMD               BIT(DQ_XCM_AGG_FLG_SHIFT_CF19)
+#define DQ_XCM_ETH_SLOW_PATH_CMD               BIT(DQ_XCM_AGG_FLG_SHIFT_CF22)
+#define DQ_XCM_ETH_TPH_EN_CMD                  BIT(DQ_XCM_AGG_FLG_SHIFT_CF23)
+#define DQ_XCM_FCOE_SLOW_PATH_CMD              BIT(DQ_XCM_AGG_FLG_SHIFT_CF22)
+#define DQ_XCM_ISCSI_DQ_FLUSH_CMD              BIT(DQ_XCM_AGG_FLG_SHIFT_CF19)
+#define DQ_XCM_ISCSI_SLOW_PATH_CMD             BIT(DQ_XCM_AGG_FLG_SHIFT_CF22)
+#define DQ_XCM_ISCSI_PROC_ONLY_CLEANUP_CMD     BIT(DQ_XCM_AGG_FLG_SHIFT_CF23)
+#define DQ_XCM_TOE_DQ_FLUSH_CMD                        BIT(DQ_XCM_AGG_FLG_SHIFT_CF19)
+#define DQ_XCM_TOE_SLOW_PATH_CMD               BIT(DQ_XCM_AGG_FLG_SHIFT_CF22)
 
 /* UCM agg counter flag selection (HW) */
 #define        DQ_UCM_AGG_FLG_SHIFT_CF0        0
 #define DQ_TCM_AGG_FLG_SHIFT_CF6       6
 #define DQ_TCM_AGG_FLG_SHIFT_CF7       7
 /* TCM agg counter flag selection (FW) */
-#define DQ_TCM_FCOE_FLUSH_Q0_CMD            BIT(DQ_TCM_AGG_FLG_SHIFT_CF1)
-#define DQ_TCM_FCOE_DUMMY_TIMER_CMD         BIT(DQ_TCM_AGG_FLG_SHIFT_CF2)
-#define DQ_TCM_FCOE_TIMER_STOP_ALL_CMD      BIT(DQ_TCM_AGG_FLG_SHIFT_CF3)
+#define DQ_TCM_FCOE_FLUSH_Q0_CMD       BIT(DQ_TCM_AGG_FLG_SHIFT_CF1)
+#define DQ_TCM_FCOE_DUMMY_TIMER_CMD    BIT(DQ_TCM_AGG_FLG_SHIFT_CF2)
+#define DQ_TCM_FCOE_TIMER_STOP_ALL_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF3)
 #define DQ_TCM_ISCSI_FLUSH_Q0_CMD      BIT(DQ_TCM_AGG_FLG_SHIFT_CF1)
 #define DQ_TCM_ISCSI_TIMER_STOP_ALL_CMD        BIT(DQ_TCM_AGG_FLG_SHIFT_CF3)
 #define DQ_TCM_TOE_FLUSH_Q0_CMD                BIT(DQ_TCM_AGG_FLG_SHIFT_CF1)
 #define DQ_TCM_IWARP_POST_RQ_CF_CMD    BIT(DQ_TCM_AGG_FLG_SHIFT_CF1)
 
 /* PWM address mapping */
-#define DQ_PWM_OFFSET_DPM_BASE 0x0
-#define DQ_PWM_OFFSET_DPM_END  0x27
+#define DQ_PWM_OFFSET_DPM_BASE         0x0
+#define DQ_PWM_OFFSET_DPM_END          0x27
 #define DQ_PWM_OFFSET_XCM16_BASE       0x40
 #define DQ_PWM_OFFSET_XCM32_BASE       0x44
 #define DQ_PWM_OFFSET_UCM16_BASE       0x48
 #define DQ_PWM_OFFSET_UCM32_BASE       0x4C
-#define DQ_PWM_OFFSET_UCM16_4  0x50
+#define DQ_PWM_OFFSET_UCM16_4          0x50
 #define DQ_PWM_OFFSET_TCM16_BASE       0x58
 #define DQ_PWM_OFFSET_TCM32_BASE       0x5C
-#define DQ_PWM_OFFSET_XCM_FLAGS        0x68
-#define DQ_PWM_OFFSET_UCM_FLAGS        0x69
-#define DQ_PWM_OFFSET_TCM_FLAGS        0x6B
+#define DQ_PWM_OFFSET_XCM_FLAGS                0x68
+#define DQ_PWM_OFFSET_UCM_FLAGS                0x69
+#define DQ_PWM_OFFSET_TCM_FLAGS                0x6B
 
 #define DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD         (DQ_PWM_OFFSET_XCM16_BASE + 2)
 #define DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT   (DQ_PWM_OFFSET_UCM32_BASE)
 #define DQ_PWM_OFFSET_UCM_RDMA_ARM_FLAGS       (DQ_PWM_OFFSET_UCM_FLAGS)
 #define DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD         (DQ_PWM_OFFSET_TCM16_BASE + 1)
 #define DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD                (DQ_PWM_OFFSET_TCM16_BASE + 3)
-#define        DQ_REGION_SHIFT (12)
+
+#define        DQ_REGION_SHIFT                 (12)
 
 /* DPM */
-#define        DQ_DPM_WQE_BUFF_SIZE    (320)
+#define        DQ_DPM_WQE_BUFF_SIZE            (320)
 
 /* Conn type ranges */
 #define        DQ_CONN_TYPE_RANGE_SHIFT        (4)
 /* QM CONSTANTS  */
 /*****************/
 
-/* number of TX queues in the QM */
+/* Number of TX queues in the QM */
 #define MAX_QM_TX_QUEUES_K2    512
 #define MAX_QM_TX_QUEUES_BB    448
 #define MAX_QM_TX_QUEUES       MAX_QM_TX_QUEUES_K2
 
-/* number of Other queues in the QM */
+/* Number of Other queues in the QM */
 #define MAX_QM_OTHER_QUEUES_BB 64
 #define MAX_QM_OTHER_QUEUES_K2 128
 #define MAX_QM_OTHER_QUEUES    MAX_QM_OTHER_QUEUES_K2
 
-/* number of queues in a PF queue group */
+/* Number of queues in a PF queue group */
 #define QM_PF_QUEUE_GROUP_SIZE 8
 
-/* the size of a single queue element in bytes */
-#define QM_PQ_ELEMENT_SIZE                      4
+/* The size of a single queue element in bytes */
+#define QM_PQ_ELEMENT_SIZE     4
 
-/* base number of Tx PQs in the CM PQ representation.
- * should be used when storing PQ IDs in CM PQ registers and context
+/* Base number of Tx PQs in the CM PQ representation.
+ * Should be used when storing PQ IDs in CM PQ registers and context.
  */
-#define CM_TX_PQ_BASE  0x200
+#define CM_TX_PQ_BASE          0x200
 
-/* number of global Vport/QCN rate limiters */
+/* Number of global Vport/QCN rate limiters */
 #define MAX_QM_GLOBAL_RLS      256
+
 /* QM registers data */
 #define QM_LINE_CRD_REG_WIDTH          16
 #define QM_LINE_CRD_REG_SIGN_BIT       BIT((QM_LINE_CRD_REG_WIDTH - 1))
 #define CAU_FSM_ETH_TX  1
 
 /* Number of Protocol Indices per Status Block */
-#define PIS_PER_SB    12
+#define PIS_PER_SB_E4  12
 
 #define CAU_HC_STOPPED_STATE   3
 #define CAU_HC_DISABLE_STATE   4
 
 #define IGU_CMD_INT_ACK_BASE           0x0400
 #define IGU_CMD_INT_ACK_UPPER          (IGU_CMD_INT_ACK_BASE + \
-                                        MAX_TOT_SB_PER_PATH -  \
-                                        1)
+                                        MAX_TOT_SB_PER_PATH - 1)
 #define IGU_CMD_INT_ACK_RESERVED_UPPER 0x05ff
 
 #define IGU_CMD_ATTN_BIT_UPD_UPPER     0x05f0
 
 #define IGU_CMD_PROD_UPD_BASE                  0x0600
 #define IGU_CMD_PROD_UPD_UPPER                 (IGU_CMD_PROD_UPD_BASE +\
-                                                MAX_TOT_SB_PER_PATH - \
-                                                1)
+                                                MAX_TOT_SB_PER_PATH - 1)
 #define IGU_CMD_PROD_UPD_RESERVED_UPPER                0x07ff
 
 /*****************/
         PXP_EXTERNAL_BAR_GLOBAL_WINDOW_LENGTH - 1)
 
 /* PF BAR */
-#define PXP_BAR0_START_GRC     0x0000
-#define PXP_BAR0_GRC_LENGTH    0x1C00000
-#define PXP_BAR0_END_GRC       (PXP_BAR0_START_GRC + \
-                                PXP_BAR0_GRC_LENGTH - 1)
-
-#define PXP_BAR0_START_IGU     0x1C00000
-#define PXP_BAR0_IGU_LENGTH    0x10000
-#define PXP_BAR0_END_IGU       (PXP_BAR0_START_IGU + \
-                                PXP_BAR0_IGU_LENGTH - 1)
-
-#define PXP_BAR0_START_TSDM    0x1C80000
-#define PXP_BAR0_SDM_LENGTH    0x40000
+#define PXP_BAR0_START_GRC             0x0000
+#define PXP_BAR0_GRC_LENGTH            0x1C00000
+#define PXP_BAR0_END_GRC               (PXP_BAR0_START_GRC + \
+                                        PXP_BAR0_GRC_LENGTH - 1)
+
+#define PXP_BAR0_START_IGU             0x1C00000
+#define PXP_BAR0_IGU_LENGTH            0x10000
+#define PXP_BAR0_END_IGU               (PXP_BAR0_START_IGU + \
+                                        PXP_BAR0_IGU_LENGTH - 1)
+
+#define PXP_BAR0_START_TSDM            0x1C80000
+#define PXP_BAR0_SDM_LENGTH            0x40000
 #define PXP_BAR0_SDM_RESERVED_LENGTH   0x40000
-#define PXP_BAR0_END_TSDM      (PXP_BAR0_START_TSDM + \
-                                PXP_BAR0_SDM_LENGTH - 1)
+#define PXP_BAR0_END_TSDM              (PXP_BAR0_START_TSDM + \
+                                        PXP_BAR0_SDM_LENGTH - 1)
 
-#define PXP_BAR0_START_MSDM    0x1D00000
-#define PXP_BAR0_END_MSDM      (PXP_BAR0_START_MSDM + \
-                                PXP_BAR0_SDM_LENGTH - 1)
+#define PXP_BAR0_START_MSDM            0x1D00000
+#define PXP_BAR0_END_MSDM              (PXP_BAR0_START_MSDM + \
+                                        PXP_BAR0_SDM_LENGTH - 1)
 
-#define PXP_BAR0_START_USDM    0x1D80000
-#define PXP_BAR0_END_USDM      (PXP_BAR0_START_USDM + \
-                                PXP_BAR0_SDM_LENGTH - 1)
+#define PXP_BAR0_START_USDM            0x1D80000
+#define PXP_BAR0_END_USDM              (PXP_BAR0_START_USDM + \
+                                        PXP_BAR0_SDM_LENGTH - 1)
 
-#define PXP_BAR0_START_XSDM    0x1E00000
-#define PXP_BAR0_END_XSDM      (PXP_BAR0_START_XSDM + \
-                                PXP_BAR0_SDM_LENGTH - 1)
+#define PXP_BAR0_START_XSDM            0x1E00000
+#define PXP_BAR0_END_XSDM              (PXP_BAR0_START_XSDM + \
+                                        PXP_BAR0_SDM_LENGTH - 1)
 
-#define PXP_BAR0_START_YSDM    0x1E80000
-#define PXP_BAR0_END_YSDM      (PXP_BAR0_START_YSDM + \
-                                PXP_BAR0_SDM_LENGTH - 1)
+#define PXP_BAR0_START_YSDM            0x1E80000
+#define PXP_BAR0_END_YSDM              (PXP_BAR0_START_YSDM + \
+                                        PXP_BAR0_SDM_LENGTH - 1)
 
-#define PXP_BAR0_START_PSDM    0x1F00000
-#define PXP_BAR0_END_PSDM      (PXP_BAR0_START_PSDM + \
-                                PXP_BAR0_SDM_LENGTH - 1)
+#define PXP_BAR0_START_PSDM            0x1F00000
+#define PXP_BAR0_END_PSDM              (PXP_BAR0_START_PSDM + \
+                                        PXP_BAR0_SDM_LENGTH - 1)
 
 #define PXP_BAR0_FIRST_INVALID_ADDRESS (PXP_BAR0_END_PSDM + 1)
 
 /* VF BAR */
-#define PXP_VF_BAR0    0
-
-#define PXP_VF_BAR0_START_GRC  0x3E00
-#define PXP_VF_BAR0_GRC_LENGTH 0x200
-#define PXP_VF_BAR0_END_GRC    (PXP_VF_BAR0_START_GRC + \
-                                PXP_VF_BAR0_GRC_LENGTH - 1)
-
-#define PXP_VF_BAR0_START_IGU                   0
-#define PXP_VF_BAR0_IGU_LENGTH                  0x3000
-#define PXP_VF_BAR0_END_IGU                     (PXP_VF_BAR0_START_IGU + \
-                                                PXP_VF_BAR0_IGU_LENGTH - 1)
-
-#define PXP_VF_BAR0_START_DQ                    0x3000
-#define PXP_VF_BAR0_DQ_LENGTH                   0x200
-#define PXP_VF_BAR0_DQ_OPAQUE_OFFSET            0
-#define PXP_VF_BAR0_ME_OPAQUE_ADDRESS           (PXP_VF_BAR0_START_DQ +        \
-                                                PXP_VF_BAR0_DQ_OPAQUE_OFFSET)
-#define PXP_VF_BAR0_ME_CONCRETE_ADDRESS         (PXP_VF_BAR0_ME_OPAQUE_ADDRESS \
-                                                + 4)
-#define PXP_VF_BAR0_END_DQ                      (PXP_VF_BAR0_START_DQ +        \
-                                                PXP_VF_BAR0_DQ_LENGTH - 1)
-
-#define PXP_VF_BAR0_START_TSDM_ZONE_B           0x3200
-#define PXP_VF_BAR0_SDM_LENGTH_ZONE_B           0x200
-#define PXP_VF_BAR0_END_TSDM_ZONE_B             (PXP_VF_BAR0_START_TSDM_ZONE_B \
-                                                +                             \
-                                                PXP_VF_BAR0_SDM_LENGTH_ZONE_B \
-                                                - 1)
-
-#define PXP_VF_BAR0_START_MSDM_ZONE_B           0x3400
-#define PXP_VF_BAR0_END_MSDM_ZONE_B             (PXP_VF_BAR0_START_MSDM_ZONE_B \
-                                                +                             \
-                                                PXP_VF_BAR0_SDM_LENGTH_ZONE_B \
-                                                - 1)
-
-#define PXP_VF_BAR0_START_USDM_ZONE_B           0x3600
-#define PXP_VF_BAR0_END_USDM_ZONE_B             (PXP_VF_BAR0_START_USDM_ZONE_B \
-                                                +                             \
-                                                PXP_VF_BAR0_SDM_LENGTH_ZONE_B \
-                                                - 1)
-
-#define PXP_VF_BAR0_START_XSDM_ZONE_B           0x3800
-#define PXP_VF_BAR0_END_XSDM_ZONE_B             (PXP_VF_BAR0_START_XSDM_ZONE_B \
-                                                +                             \
-                                                PXP_VF_BAR0_SDM_LENGTH_ZONE_B \
-                                                - 1)
-
-#define PXP_VF_BAR0_START_YSDM_ZONE_B           0x3a00
-#define PXP_VF_BAR0_END_YSDM_ZONE_B             (PXP_VF_BAR0_START_YSDM_ZONE_B \
-                                                +                             \
-                                                PXP_VF_BAR0_SDM_LENGTH_ZONE_B \
-                                                - 1)
-
-#define PXP_VF_BAR0_START_PSDM_ZONE_B           0x3c00
-#define PXP_VF_BAR0_END_PSDM_ZONE_B             (PXP_VF_BAR0_START_PSDM_ZONE_B \
-                                                +                             \
-                                                PXP_VF_BAR0_SDM_LENGTH_ZONE_B \
-                                                - 1)
-
-#define PXP_VF_BAR0_START_SDM_ZONE_A            0x4000
-#define PXP_VF_BAR0_END_SDM_ZONE_A              0x10000
-
-#define PXP_VF_BAR0_GRC_WINDOW_LENGTH           32
-
-#define PXP_ILT_PAGE_SIZE_NUM_BITS_MIN         12
-#define PXP_ILT_BLOCK_FACTOR_MULTIPLIER                1024
+#define PXP_VF_BAR0                    0
+
+#define PXP_VF_BAR0_START_IGU          0
+#define PXP_VF_BAR0_IGU_LENGTH         0x3000
+#define PXP_VF_BAR0_END_IGU            (PXP_VF_BAR0_START_IGU + \
+                                        PXP_VF_BAR0_IGU_LENGTH - 1)
+
+#define PXP_VF_BAR0_START_DQ           0x3000
+#define PXP_VF_BAR0_DQ_LENGTH          0x200
+#define PXP_VF_BAR0_DQ_OPAQUE_OFFSET   0
+#define PXP_VF_BAR0_ME_OPAQUE_ADDRESS  (PXP_VF_BAR0_START_DQ + \
+                                        PXP_VF_BAR0_DQ_OPAQUE_OFFSET)
+#define PXP_VF_BAR0_ME_CONCRETE_ADDRESS        (PXP_VF_BAR0_ME_OPAQUE_ADDRESS \
+                                        + 4)
+#define PXP_VF_BAR0_END_DQ             (PXP_VF_BAR0_START_DQ + \
+                                        PXP_VF_BAR0_DQ_LENGTH - 1)
+
+#define PXP_VF_BAR0_START_TSDM_ZONE_B  0x3200
+#define PXP_VF_BAR0_SDM_LENGTH_ZONE_B  0x200
+#define PXP_VF_BAR0_END_TSDM_ZONE_B    (PXP_VF_BAR0_START_TSDM_ZONE_B + \
+                                        PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
+
+#define PXP_VF_BAR0_START_MSDM_ZONE_B  0x3400
+#define PXP_VF_BAR0_END_MSDM_ZONE_B    (PXP_VF_BAR0_START_MSDM_ZONE_B + \
+                                        PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
+
+#define PXP_VF_BAR0_START_USDM_ZONE_B  0x3600
+#define PXP_VF_BAR0_END_USDM_ZONE_B    (PXP_VF_BAR0_START_USDM_ZONE_B + \
+                                        PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
+
+#define PXP_VF_BAR0_START_XSDM_ZONE_B  0x3800
+#define PXP_VF_BAR0_END_XSDM_ZONE_B    (PXP_VF_BAR0_START_XSDM_ZONE_B + \
+                                        PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
+
+#define PXP_VF_BAR0_START_YSDM_ZONE_B  0x3a00
+#define PXP_VF_BAR0_END_YSDM_ZONE_B    (PXP_VF_BAR0_START_YSDM_ZONE_B + \
+                                        PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
+
+#define PXP_VF_BAR0_START_PSDM_ZONE_B  0x3c00
+#define PXP_VF_BAR0_END_PSDM_ZONE_B    (PXP_VF_BAR0_START_PSDM_ZONE_B + \
+                                        PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
+
+#define PXP_VF_BAR0_START_GRC          0x3E00
+#define PXP_VF_BAR0_GRC_LENGTH         0x200
+#define PXP_VF_BAR0_END_GRC            (PXP_VF_BAR0_START_GRC + \
+                                        PXP_VF_BAR0_GRC_LENGTH - 1)
+
+#define PXP_VF_BAR0_START_SDM_ZONE_A   0x4000
+#define PXP_VF_BAR0_END_SDM_ZONE_A     0x10000
+
+#define PXP_VF_BAR0_START_IGU2         0x10000
+#define PXP_VF_BAR0_IGU2_LENGTH                0xD000
+#define PXP_VF_BAR0_END_IGU2           (PXP_VF_BAR0_START_IGU2 + \
+                                        PXP_VF_BAR0_IGU2_LENGTH - 1)
+
+#define PXP_VF_BAR0_GRC_WINDOW_LENGTH  32
+
+#define PXP_ILT_PAGE_SIZE_NUM_BITS_MIN 12
+#define PXP_ILT_BLOCK_FACTOR_MULTIPLIER        1024
 
 /* ILT Records */
 #define PXP_NUM_ILT_RECORDS_BB 7600
 #define PXP_NUM_ILT_RECORDS_K2 11000
 #define MAX_NUM_ILT_RECORDS MAX(PXP_NUM_ILT_RECORDS_BB, PXP_NUM_ILT_RECORDS_K2)
-#define PXP_QUEUES_ZONE_MAX_NUM 320
+
+/* Host Interface */
+#define PXP_QUEUES_ZONE_MAX_NUM        320
+
 /*****************/
 /* PRM CONSTANTS */
 /*****************/
-#define PRM_DMA_PAD_BYTES_NUM  2
+#define PRM_DMA_PAD_BYTES_NUM  2
+
 /*****************/
 /* SDMs CONSTANTS  */
 /*****************/
 
-#define SDM_OP_GEN_TRIG_NONE                    0
-#define SDM_OP_GEN_TRIG_WAKE_THREAD             1
-#define SDM_OP_GEN_TRIG_AGG_INT                 2
-#define SDM_OP_GEN_TRIG_LOADER                  4
+#define SDM_OP_GEN_TRIG_NONE           0
+#define SDM_OP_GEN_TRIG_WAKE_THREAD    1
+#define SDM_OP_GEN_TRIG_AGG_INT                2
+#define SDM_OP_GEN_TRIG_LOADER         4
 #define SDM_OP_GEN_TRIG_INDICATE_ERROR  6
 #define SDM_OP_GEN_TRIG_INC_ORDER_CNT   9
 
 /* Completion types */
 /********************/
 
-#define SDM_COMP_TYPE_NONE              0
-#define SDM_COMP_TYPE_WAKE_THREAD       1
-#define SDM_COMP_TYPE_AGG_INT           2
-#define SDM_COMP_TYPE_CM                3
-#define SDM_COMP_TYPE_LOADER            4
-#define SDM_COMP_TYPE_PXP               5
-#define SDM_COMP_TYPE_INDICATE_ERROR    6
-#define SDM_COMP_TYPE_RELEASE_THREAD    7
-#define SDM_COMP_TYPE_RAM               8
-#define SDM_COMP_TYPE_INC_ORDER_CNT     9
+#define SDM_COMP_TYPE_NONE             0
+#define SDM_COMP_TYPE_WAKE_THREAD      1
+#define SDM_COMP_TYPE_AGG_INT          2
+#define SDM_COMP_TYPE_CM               3
+#define SDM_COMP_TYPE_LOADER           4
+#define SDM_COMP_TYPE_PXP              5
+#define SDM_COMP_TYPE_INDICATE_ERROR   6
+#define SDM_COMP_TYPE_RELEASE_THREAD   7
+#define SDM_COMP_TYPE_RAM              8
+#define SDM_COMP_TYPE_INC_ORDER_CNT    9
 
 /*****************/
-/* PBF Constants */
+/* PBF CONSTANTS */
 /*****************/
 
 /* Number of PBF command queue lines. Each line is 32B. */
-#define PBF_MAX_CMD_LINES 3328
+#define PBF_MAX_CMD_LINES      3328
 
 /* Number of BTB blocks. Each block is 256B. */
-#define BTB_MAX_BLOCKS 1440
+#define BTB_MAX_BLOCKS         1440
 
 /*****************/
 /* PRS CONSTANTS */
 
 #define PRS_GFT_CAM_LINES_NO_MATCH     31
 
-/* Async data KCQ CQE */
-struct async_data {
-       __le32  cid;
-       __le16  itid;
-       u8      error_code;
-       u8      fw_debug_param;
-};
-
+/* Interrupt coalescing TimeSet */
 struct coalescing_timeset {
        u8 value;
 #define        COALESCING_TIMESET_TIMESET_MASK         0x7F
@@ -692,23 +674,32 @@ struct common_queue_zone {
        __le16 reserved;
 };
 
+/* ETH Rx producers data */
 struct eth_rx_prod_data {
        __le16 bd_prod;
        __le16 cqe_prod;
 };
 
-struct regpair {
-       __le32  lo;
-       __le32  hi;
+struct tcp_ulp_connect_done_params {
+       __le16 mss;
+       u8 snd_wnd_scale;
+       u8 flags;
+#define TCP_ULP_CONNECT_DONE_PARAMS_TS_EN_MASK         0x1
+#define TCP_ULP_CONNECT_DONE_PARAMS_TS_EN_SHIFT                0
+#define TCP_ULP_CONNECT_DONE_PARAMS_RESERVED_MASK      0x7F
+#define TCP_ULP_CONNECT_DONE_PARAMS_RESERVED_SHIFT     1
 };
 
-struct vf_pf_channel_eqe_data {
-       struct regpair msg_addr;
+struct iscsi_connect_done_results {
+       __le16 icid;
+       __le16 conn_id;
+       struct tcp_ulp_connect_done_params params;
 };
 
 struct iscsi_eqe_data {
-       __le32 cid;
+       __le16 icid;
        __le16 conn_id;
+       __le16 reserved;
        u8 error_code;
        u8 error_pdu_opcode_reserved;
 #define ISCSI_EQE_DATA_ERROR_PDU_OPCODE_MASK           0x3F
@@ -719,52 +710,6 @@ struct iscsi_eqe_data {
 #define ISCSI_EQE_DATA_RESERVED0_SHIFT                 7
 };
 
-struct rdma_eqe_destroy_qp {
-       __le32 cid;
-       u8 reserved[4];
-};
-
-union rdma_eqe_data {
-       struct regpair async_handle;
-       struct rdma_eqe_destroy_qp rdma_destroy_qp_data;
-};
-
-struct malicious_vf_eqe_data {
-       u8 vf_id;
-       u8 err_id;
-       __le16 reserved[3];
-};
-
-struct initial_cleanup_eqe_data {
-       u8 vf_id;
-       u8 reserved[7];
-};
-
-/* Event Data Union */
-union event_ring_data {
-       u8 bytes[8];
-       struct vf_pf_channel_eqe_data vf_pf_channel;
-       struct iscsi_eqe_data iscsi_info;
-       union rdma_eqe_data rdma_data;
-       struct malicious_vf_eqe_data malicious_vf;
-       struct initial_cleanup_eqe_data vf_init_cleanup;
-};
-
-/* Event Ring Entry */
-struct event_ring_entry {
-       u8                      protocol_id;
-       u8                      opcode;
-       __le16                  reserved0;
-       __le16                  echo;
-       u8                      fw_return_code;
-       u8                      flags;
-#define EVENT_RING_ENTRY_ASYNC_MASK      0x1
-#define EVENT_RING_ENTRY_ASYNC_SHIFT     0
-#define EVENT_RING_ENTRY_RESERVED1_MASK  0x7F
-#define EVENT_RING_ENTRY_RESERVED1_SHIFT 1
-       union event_ring_data   data;
-};
-
 /* Multi function mode */
 enum mf_mode {
        ERROR_MODE /* Unsupported mode */,
@@ -781,13 +726,31 @@ enum protocol_type {
        PROTOCOLID_CORE,
        PROTOCOLID_ETH,
        PROTOCOLID_IWARP,
-       PROTOCOLID_RESERVED5,
+       PROTOCOLID_RESERVED0,
        PROTOCOLID_PREROCE,
        PROTOCOLID_COMMON,
-       PROTOCOLID_RESERVED6,
+       PROTOCOLID_RESERVED1,
        MAX_PROTOCOL_TYPE
 };
 
+struct regpair {
+       __le32 lo;
+       __le32 hi;
+};
+
+/* RoCE Destroy Event Data */
+struct rdma_eqe_destroy_qp {
+       __le32 cid;
+       u8 reserved[4];
+};
+
+/* RDMA Event Data Union */
+union rdma_eqe_data {
+       struct regpair async_handle;
+       struct rdma_eqe_destroy_qp rdma_destroy_qp_data;
+};
+
+/* Ustorm Queue Zone */
 struct ustorm_eth_queue_zone {
        struct coalescing_timeset int_coalescing_timeset;
        u8 reserved[3];
@@ -798,62 +761,71 @@ struct ustorm_queue_zone {
        struct common_queue_zone common;
 };
 
-/* status block structure */
+/* Status block structure */
 struct cau_pi_entry {
-       u32 prod;
-#define CAU_PI_ENTRY_PROD_VAL_MASK    0xFFFF
-#define CAU_PI_ENTRY_PROD_VAL_SHIFT   0
-#define CAU_PI_ENTRY_PI_TIMESET_MASK  0x7F
-#define CAU_PI_ENTRY_PI_TIMESET_SHIFT 16
-#define CAU_PI_ENTRY_FSM_SEL_MASK     0x1
-#define CAU_PI_ENTRY_FSM_SEL_SHIFT    23
-#define CAU_PI_ENTRY_RESERVED_MASK    0xFF
-#define CAU_PI_ENTRY_RESERVED_SHIFT   24
+       __le32 prod;
+#define CAU_PI_ENTRY_PROD_VAL_MASK     0xFFFF
+#define CAU_PI_ENTRY_PROD_VAL_SHIFT    0
+#define CAU_PI_ENTRY_PI_TIMESET_MASK   0x7F
+#define CAU_PI_ENTRY_PI_TIMESET_SHIFT  16
+#define CAU_PI_ENTRY_FSM_SEL_MASK      0x1
+#define CAU_PI_ENTRY_FSM_SEL_SHIFT     23
+#define CAU_PI_ENTRY_RESERVED_MASK     0xFF
+#define CAU_PI_ENTRY_RESERVED_SHIFT    24
 };
 
-/* status block structure */
+/* Status block structure */
 struct cau_sb_entry {
-       u32 data;
-#define CAU_SB_ENTRY_SB_PROD_MASK      0xFFFFFF
-#define CAU_SB_ENTRY_SB_PROD_SHIFT     0
-#define CAU_SB_ENTRY_STATE0_MASK       0xF
-#define CAU_SB_ENTRY_STATE0_SHIFT      24
-#define CAU_SB_ENTRY_STATE1_MASK       0xF
-#define CAU_SB_ENTRY_STATE1_SHIFT      28
-       u32 params;
-#define CAU_SB_ENTRY_SB_TIMESET0_MASK  0x7F
-#define CAU_SB_ENTRY_SB_TIMESET0_SHIFT 0
-#define CAU_SB_ENTRY_SB_TIMESET1_MASK  0x7F
-#define CAU_SB_ENTRY_SB_TIMESET1_SHIFT 7
-#define CAU_SB_ENTRY_TIMER_RES0_MASK   0x3
-#define CAU_SB_ENTRY_TIMER_RES0_SHIFT  14
-#define CAU_SB_ENTRY_TIMER_RES1_MASK   0x3
-#define CAU_SB_ENTRY_TIMER_RES1_SHIFT  16
-#define CAU_SB_ENTRY_VF_NUMBER_MASK    0xFF
-#define CAU_SB_ENTRY_VF_NUMBER_SHIFT   18
-#define CAU_SB_ENTRY_VF_VALID_MASK     0x1
-#define CAU_SB_ENTRY_VF_VALID_SHIFT    26
-#define CAU_SB_ENTRY_PF_NUMBER_MASK    0xF
-#define CAU_SB_ENTRY_PF_NUMBER_SHIFT   27
-#define CAU_SB_ENTRY_TPH_MASK          0x1
-#define CAU_SB_ENTRY_TPH_SHIFT         31
+       __le32 data;
+#define CAU_SB_ENTRY_SB_PROD_MASK      0xFFFFFF
+#define CAU_SB_ENTRY_SB_PROD_SHIFT     0
+#define CAU_SB_ENTRY_STATE0_MASK       0xF
+#define CAU_SB_ENTRY_STATE0_SHIFT      24
+#define CAU_SB_ENTRY_STATE1_MASK       0xF
+#define CAU_SB_ENTRY_STATE1_SHIFT      28
+       __le32 params;
+#define CAU_SB_ENTRY_SB_TIMESET0_MASK  0x7F
+#define CAU_SB_ENTRY_SB_TIMESET0_SHIFT 0
+#define CAU_SB_ENTRY_SB_TIMESET1_MASK  0x7F
+#define CAU_SB_ENTRY_SB_TIMESET1_SHIFT 7
+#define CAU_SB_ENTRY_TIMER_RES0_MASK   0x3
+#define CAU_SB_ENTRY_TIMER_RES0_SHIFT  14
+#define CAU_SB_ENTRY_TIMER_RES1_MASK   0x3
+#define CAU_SB_ENTRY_TIMER_RES1_SHIFT  16
+#define CAU_SB_ENTRY_VF_NUMBER_MASK    0xFF
+#define CAU_SB_ENTRY_VF_NUMBER_SHIFT   18
+#define CAU_SB_ENTRY_VF_VALID_MASK     0x1
+#define CAU_SB_ENTRY_VF_VALID_SHIFT    26
+#define CAU_SB_ENTRY_PF_NUMBER_MASK    0xF
+#define CAU_SB_ENTRY_PF_NUMBER_SHIFT   27
+#define CAU_SB_ENTRY_TPH_MASK          0x1
+#define CAU_SB_ENTRY_TPH_SHIFT         31
 };
 
-/* core doorbell data */
+/* Igu cleanup bit values to distinguish between clean or producer consumer
+ * update.
+ */
+enum command_type_bit {
+       IGU_COMMAND_TYPE_NOP = 0,
+       IGU_COMMAND_TYPE_SET = 1,
+       MAX_COMMAND_TYPE_BIT
+};
+
+/* Core doorbell data */
 struct core_db_data {
        u8 params;
-#define CORE_DB_DATA_DEST_MASK         0x3
-#define CORE_DB_DATA_DEST_SHIFT        0
-#define CORE_DB_DATA_AGG_CMD_MASK      0x3
-#define CORE_DB_DATA_AGG_CMD_SHIFT     2
-#define CORE_DB_DATA_BYPASS_EN_MASK    0x1
-#define CORE_DB_DATA_BYPASS_EN_SHIFT   4
-#define CORE_DB_DATA_RESERVED_MASK     0x1
-#define CORE_DB_DATA_RESERVED_SHIFT    5
-#define CORE_DB_DATA_AGG_VAL_SEL_MASK  0x3
-#define CORE_DB_DATA_AGG_VAL_SEL_SHIFT 6
-       u8      agg_flags;
-       __le16  spq_prod;
+#define CORE_DB_DATA_DEST_MASK         0x3
+#define CORE_DB_DATA_DEST_SHIFT                0
+#define CORE_DB_DATA_AGG_CMD_MASK      0x3
+#define CORE_DB_DATA_AGG_CMD_SHIFT     2
+#define CORE_DB_DATA_BYPASS_EN_MASK    0x1
+#define CORE_DB_DATA_BYPASS_EN_SHIFT   4
+#define CORE_DB_DATA_RESERVED_MASK     0x1
+#define CORE_DB_DATA_RESERVED_SHIFT    5
+#define CORE_DB_DATA_AGG_VAL_SEL_MASK  0x3
+#define CORE_DB_DATA_AGG_VAL_SEL_SHIFT 6
+       u8 agg_flags;
+       __le16 spq_prod;
 };
 
 /* Enum of doorbell aggregative command selection */
@@ -909,67 +881,69 @@ struct db_l2_dpm_sge {
        struct regpair addr;
        __le16 nbytes;
        __le16 bitfields;
-#define DB_L2_DPM_SGE_TPH_ST_INDEX_MASK        0x1FF
-#define DB_L2_DPM_SGE_TPH_ST_INDEX_SHIFT 0
-#define DB_L2_DPM_SGE_RESERVED0_MASK   0x3
-#define DB_L2_DPM_SGE_RESERVED0_SHIFT  9
-#define DB_L2_DPM_SGE_ST_VALID_MASK    0x1
-#define DB_L2_DPM_SGE_ST_VALID_SHIFT   11
-#define DB_L2_DPM_SGE_RESERVED1_MASK   0xF
-#define DB_L2_DPM_SGE_RESERVED1_SHIFT  12
+#define DB_L2_DPM_SGE_TPH_ST_INDEX_MASK                0x1FF
+#define DB_L2_DPM_SGE_TPH_ST_INDEX_SHIFT       0
+#define DB_L2_DPM_SGE_RESERVED0_MASK           0x3
+#define DB_L2_DPM_SGE_RESERVED0_SHIFT          9
+#define DB_L2_DPM_SGE_ST_VALID_MASK            0x1
+#define DB_L2_DPM_SGE_ST_VALID_SHIFT           11
+#define DB_L2_DPM_SGE_RESERVED1_MASK           0xF
+#define DB_L2_DPM_SGE_RESERVED1_SHIFT          12
        __le32 reserved2;
 };
 
 /* Structure for doorbell address, in legacy mode */
 struct db_legacy_addr {
        __le32 addr;
-#define DB_LEGACY_ADDR_RESERVED0_MASK  0x3
-#define DB_LEGACY_ADDR_RESERVED0_SHIFT 0
-#define DB_LEGACY_ADDR_DEMS_MASK       0x7
-#define DB_LEGACY_ADDR_DEMS_SHIFT      2
-#define DB_LEGACY_ADDR_ICID_MASK       0x7FFFFFF
-#define DB_LEGACY_ADDR_ICID_SHIFT      5
+#define DB_LEGACY_ADDR_RESERVED0_MASK  0x3
+#define DB_LEGACY_ADDR_RESERVED0_SHIFT 0
+#define DB_LEGACY_ADDR_DEMS_MASK       0x7
+#define DB_LEGACY_ADDR_DEMS_SHIFT      2
+#define DB_LEGACY_ADDR_ICID_MASK       0x7FFFFFF
+#define DB_LEGACY_ADDR_ICID_SHIFT      5
 };
 
 /* Structure for doorbell address, in PWM mode */
 struct db_pwm_addr {
        __le32 addr;
 #define DB_PWM_ADDR_RESERVED0_MASK     0x7
-#define DB_PWM_ADDR_RESERVED0_SHIFT 0
-#define DB_PWM_ADDR_OFFSET_MASK        0x7F
+#define DB_PWM_ADDR_RESERVED0_SHIFT    0
+#define DB_PWM_ADDR_OFFSET_MASK                0x7F
 #define DB_PWM_ADDR_OFFSET_SHIFT       3
-#define DB_PWM_ADDR_WID_MASK   0x3
-#define DB_PWM_ADDR_WID_SHIFT  10
-#define DB_PWM_ADDR_DPI_MASK   0xFFFF
-#define DB_PWM_ADDR_DPI_SHIFT  12
+#define DB_PWM_ADDR_WID_MASK           0x3
+#define DB_PWM_ADDR_WID_SHIFT          10
+#define DB_PWM_ADDR_DPI_MASK           0xFFFF
+#define DB_PWM_ADDR_DPI_SHIFT          12
 #define DB_PWM_ADDR_RESERVED1_MASK     0xF
-#define DB_PWM_ADDR_RESERVED1_SHIFT 28
+#define DB_PWM_ADDR_RESERVED1_SHIFT    28
 };
 
-/* Parameters to RoCE firmware, passed in EDPM doorbell */
+/* Parameters to RDMA firmware, passed in EDPM doorbell */
 struct db_rdma_dpm_params {
        __le32 params;
-#define DB_RDMA_DPM_PARAMS_SIZE_MASK           0x3F
-#define DB_RDMA_DPM_PARAMS_SIZE_SHIFT          0
-#define DB_RDMA_DPM_PARAMS_DPM_TYPE_MASK       0x3
-#define DB_RDMA_DPM_PARAMS_DPM_TYPE_SHIFT      6
-#define DB_RDMA_DPM_PARAMS_OPCODE_MASK         0xFF
-#define DB_RDMA_DPM_PARAMS_OPCODE_SHIFT                8
-#define DB_RDMA_DPM_PARAMS_WQE_SIZE_MASK       0x7FF
-#define DB_RDMA_DPM_PARAMS_WQE_SIZE_SHIFT      16
-#define DB_RDMA_DPM_PARAMS_RESERVED0_MASK      0x1
-#define DB_RDMA_DPM_PARAMS_RESERVED0_SHIFT     27
-#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_MASK 0x1
-#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_SHIFT 28
-#define DB_RDMA_DPM_PARAMS_S_FLG_MASK          0x1
-#define DB_RDMA_DPM_PARAMS_S_FLG_SHIFT         29
-#define DB_RDMA_DPM_PARAMS_RESERVED1_MASK      0x1
-#define DB_RDMA_DPM_PARAMS_RESERVED1_SHIFT     30
+#define DB_RDMA_DPM_PARAMS_SIZE_MASK                   0x3F
+#define DB_RDMA_DPM_PARAMS_SIZE_SHIFT                  0
+#define DB_RDMA_DPM_PARAMS_DPM_TYPE_MASK               0x3
+#define DB_RDMA_DPM_PARAMS_DPM_TYPE_SHIFT              6
+#define DB_RDMA_DPM_PARAMS_OPCODE_MASK                 0xFF
+#define DB_RDMA_DPM_PARAMS_OPCODE_SHIFT                        8
+#define DB_RDMA_DPM_PARAMS_WQE_SIZE_MASK               0x7FF
+#define DB_RDMA_DPM_PARAMS_WQE_SIZE_SHIFT              16
+#define DB_RDMA_DPM_PARAMS_RESERVED0_MASK              0x1
+#define DB_RDMA_DPM_PARAMS_RESERVED0_SHIFT             27
+#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_MASK         0x1
+#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_SHIFT                28
+#define DB_RDMA_DPM_PARAMS_S_FLG_MASK                  0x1
+#define DB_RDMA_DPM_PARAMS_S_FLG_SHIFT                 29
+#define DB_RDMA_DPM_PARAMS_RESERVED1_MASK              0x1
+#define DB_RDMA_DPM_PARAMS_RESERVED1_SHIFT             30
 #define DB_RDMA_DPM_PARAMS_CONN_TYPE_IS_IWARP_MASK     0x1
 #define DB_RDMA_DPM_PARAMS_CONN_TYPE_IS_IWARP_SHIFT    31
 };
 
-/* Structure for doorbell data, in ROCE DPM mode, for 1st db in a DPM burst */
+/* Structure for doorbell data, in RDMA DPM mode, for the first doorbell in a
+ * DPM burst.
+ */
 struct db_rdma_dpm_data {
        __le16 icid;
        __le16 prod_val;
@@ -987,22 +961,22 @@ enum igu_int_cmd {
 
 /* IGU producer or consumer update command */
 struct igu_prod_cons_update {
-       u32 sb_id_and_flags;
-#define IGU_PROD_CONS_UPDATE_SB_INDEX_MASK        0xFFFFFF
-#define IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT       0
-#define IGU_PROD_CONS_UPDATE_UPDATE_FLAG_MASK     0x1
-#define IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT    24
-#define IGU_PROD_CONS_UPDATE_ENABLE_INT_MASK      0x3
-#define IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT     25
-#define IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_MASK  0x1
-#define IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT 27
-#define IGU_PROD_CONS_UPDATE_TIMER_MASK_MASK      0x1
-#define IGU_PROD_CONS_UPDATE_TIMER_MASK_SHIFT     28
-#define IGU_PROD_CONS_UPDATE_RESERVED0_MASK       0x3
-#define IGU_PROD_CONS_UPDATE_RESERVED0_SHIFT      29
-#define IGU_PROD_CONS_UPDATE_COMMAND_TYPE_MASK    0x1
-#define IGU_PROD_CONS_UPDATE_COMMAND_TYPE_SHIFT   31
-       u32 reserved1;
+       __le32 sb_id_and_flags;
+#define IGU_PROD_CONS_UPDATE_SB_INDEX_MASK             0xFFFFFF
+#define IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT            0
+#define IGU_PROD_CONS_UPDATE_UPDATE_FLAG_MASK          0x1
+#define IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT         24
+#define IGU_PROD_CONS_UPDATE_ENABLE_INT_MASK           0x3
+#define IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT          25
+#define IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_MASK       0x1
+#define IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT      27
+#define IGU_PROD_CONS_UPDATE_TIMER_MASK_MASK           0x1
+#define IGU_PROD_CONS_UPDATE_TIMER_MASK_SHIFT          28
+#define IGU_PROD_CONS_UPDATE_RESERVED0_MASK            0x3
+#define IGU_PROD_CONS_UPDATE_RESERVED0_SHIFT           29
+#define IGU_PROD_CONS_UPDATE_COMMAND_TYPE_MASK         0x1
+#define IGU_PROD_CONS_UPDATE_COMMAND_TYPE_SHIFT                31
+       __le32 reserved1;
 };
 
 /* Igu segments access for default status block only */
@@ -1012,38 +986,63 @@ enum igu_seg_access {
        MAX_IGU_SEG_ACCESS
 };
 
+/* Enumeration for L3 type field of parsing_and_err_flags.
+ * L3Type: 0 - unknown (not ip), 1 - Ipv4, 2 - Ipv6
+ * (This field can be filled according to the last-ethertype)
+ */
+enum l3_type {
+       e_l3_type_unknown,
+       e_l3_type_ipv4,
+       e_l3_type_ipv6,
+       MAX_L3_TYPE
+};
+
+/* Enumeration for l4Protocol field of parsing_and_err_flags.
+ * L4-protocol: 0 - none, 1 - TCP, 2 - UDP.
+ * If the packet is IPv4 fragment, and its not the first fragment, the
+ * protocol-type should be set to none.
+ */
+enum l4_protocol {
+       e_l4_protocol_none,
+       e_l4_protocol_tcp,
+       e_l4_protocol_udp,
+       MAX_L4_PROTOCOL
+};
+
+/* Parsing and error flags field */
 struct parsing_and_err_flags {
        __le16 flags;
-#define PARSING_AND_ERR_FLAGS_L3TYPE_MASK                      0x3
-#define PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT                     0
-#define PARSING_AND_ERR_FLAGS_L4PROTOCOL_MASK                  0x3
-#define PARSING_AND_ERR_FLAGS_L4PROTOCOL_SHIFT                 2
-#define PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK                    0x1
-#define PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT                   4
-#define PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK               0x1
-#define PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT              5
-#define PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK        0x1
-#define PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT       6
-#define PARSING_AND_ERR_FLAGS_TIMESYNCPKT_MASK                 0x1
-#define PARSING_AND_ERR_FLAGS_TIMESYNCPKT_SHIFT                7
-#define PARSING_AND_ERR_FLAGS_TIMESTAMPRECORDED_MASK           0x1
-#define PARSING_AND_ERR_FLAGS_TIMESTAMPRECORDED_SHIFT          8
-#define PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK                  0x1
-#define PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT                 9
-#define PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK                0x1
-#define PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT               10
-#define PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK                 0x1
-#define PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT                11
-#define PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_MASK         0x1
-#define PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_SHIFT        12
-#define PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK            0x1
-#define PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT           13
-#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK  0x1
-#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT 14
-#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK          0x1
-#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT         15
+#define PARSING_AND_ERR_FLAGS_L3TYPE_MASK                      0x3
+#define PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT                     0
+#define PARSING_AND_ERR_FLAGS_L4PROTOCOL_MASK                  0x3
+#define PARSING_AND_ERR_FLAGS_L4PROTOCOL_SHIFT                 2
+#define PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK                    0x1
+#define PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT                   4
+#define PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK               0x1
+#define PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT              5
+#define PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK                0x1
+#define PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT       6
+#define PARSING_AND_ERR_FLAGS_TIMESYNCPKT_MASK                 0x1
+#define PARSING_AND_ERR_FLAGS_TIMESYNCPKT_SHIFT                        7
+#define PARSING_AND_ERR_FLAGS_TIMESTAMPRECORDED_MASK           0x1
+#define PARSING_AND_ERR_FLAGS_TIMESTAMPRECORDED_SHIFT          8
+#define PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK                  0x1
+#define PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT                 9
+#define PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK                        0x1
+#define PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT               10
+#define PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK                 0x1
+#define PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT                        11
+#define PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_MASK         0x1
+#define PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_SHIFT                12
+#define PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK            0x1
+#define PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT           13
+#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK  0x1
+#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT 14
+#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK          0x1
+#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT         15
 };
 
+/* Parsing error flags bitmap */
 struct parsing_err_flags {
        __le16 flags;
 #define PARSING_ERR_FLAGS_MAC_ERROR_MASK                               0x1
@@ -1080,266 +1079,260 @@ struct parsing_err_flags {
 #define PARSING_ERR_FLAGS_TUNNEL_L4_CHKSM_ERROR_SHIFT                  15
 };
 
+/* Pb context */
 struct pb_context {
        __le32 crc[4];
 };
 
+/* Concrete Function ID */
 struct pxp_concrete_fid {
        __le16 fid;
-#define PXP_CONCRETE_FID_PFID_MASK     0xF
-#define PXP_CONCRETE_FID_PFID_SHIFT    0
-#define PXP_CONCRETE_FID_PORT_MASK     0x3
-#define PXP_CONCRETE_FID_PORT_SHIFT    4
-#define PXP_CONCRETE_FID_PATH_MASK     0x1
-#define PXP_CONCRETE_FID_PATH_SHIFT    6
-#define PXP_CONCRETE_FID_VFVALID_MASK  0x1
-#define PXP_CONCRETE_FID_VFVALID_SHIFT 7
-#define PXP_CONCRETE_FID_VFID_MASK     0xFF
-#define PXP_CONCRETE_FID_VFID_SHIFT    8
+#define PXP_CONCRETE_FID_PFID_MASK     0xF
+#define PXP_CONCRETE_FID_PFID_SHIFT    0
+#define PXP_CONCRETE_FID_PORT_MASK     0x3
+#define PXP_CONCRETE_FID_PORT_SHIFT    4
+#define PXP_CONCRETE_FID_PATH_MASK     0x1
+#define PXP_CONCRETE_FID_PATH_SHIFT    6
+#define PXP_CONCRETE_FID_VFVALID_MASK  0x1
+#define PXP_CONCRETE_FID_VFVALID_SHIFT 7
+#define PXP_CONCRETE_FID_VFID_MASK     0xFF
+#define PXP_CONCRETE_FID_VFID_SHIFT    8
 };
 
+/* Concrete Function ID */
 struct pxp_pretend_concrete_fid {
        __le16 fid;
-#define PXP_PRETEND_CONCRETE_FID_PFID_MASK      0xF
-#define PXP_PRETEND_CONCRETE_FID_PFID_SHIFT     0
-#define PXP_PRETEND_CONCRETE_FID_RESERVED_MASK  0x7
-#define PXP_PRETEND_CONCRETE_FID_RESERVED_SHIFT 4
-#define PXP_PRETEND_CONCRETE_FID_VFVALID_MASK   0x1
-#define PXP_PRETEND_CONCRETE_FID_VFVALID_SHIFT  7
-#define PXP_PRETEND_CONCRETE_FID_VFID_MASK      0xFF
-#define PXP_PRETEND_CONCRETE_FID_VFID_SHIFT     8
+#define PXP_PRETEND_CONCRETE_FID_PFID_MASK     0xF
+#define PXP_PRETEND_CONCRETE_FID_PFID_SHIFT    0
+#define PXP_PRETEND_CONCRETE_FID_RESERVED_MASK 0x7
+#define PXP_PRETEND_CONCRETE_FID_RESERVED_SHIFT        4
+#define PXP_PRETEND_CONCRETE_FID_VFVALID_MASK  0x1
+#define PXP_PRETEND_CONCRETE_FID_VFVALID_SHIFT 7
+#define PXP_PRETEND_CONCRETE_FID_VFID_MASK     0xFF
+#define PXP_PRETEND_CONCRETE_FID_VFID_SHIFT    8
 };
 
+/* Function ID */
 union pxp_pretend_fid {
        struct pxp_pretend_concrete_fid concrete_fid;
-       __le16                          opaque_fid;
+       __le16 opaque_fid;
 };
 
-/* Pxp Pretend Command Register. */
+/* Pxp Pretend Command Register */
 struct pxp_pretend_cmd {
-       union pxp_pretend_fid   fid;
-       __le16                  control;
-#define PXP_PRETEND_CMD_PATH_MASK              0x1
-#define PXP_PRETEND_CMD_PATH_SHIFT             0
-#define PXP_PRETEND_CMD_USE_PORT_MASK          0x1
-#define PXP_PRETEND_CMD_USE_PORT_SHIFT         1
-#define PXP_PRETEND_CMD_PORT_MASK              0x3
-#define PXP_PRETEND_CMD_PORT_SHIFT             2
-#define PXP_PRETEND_CMD_RESERVED0_MASK         0xF
-#define PXP_PRETEND_CMD_RESERVED0_SHIFT        4
-#define PXP_PRETEND_CMD_RESERVED1_MASK         0xF
-#define PXP_PRETEND_CMD_RESERVED1_SHIFT        8
-#define PXP_PRETEND_CMD_PRETEND_PATH_MASK      0x1
-#define PXP_PRETEND_CMD_PRETEND_PATH_SHIFT     12
-#define PXP_PRETEND_CMD_PRETEND_PORT_MASK      0x1
-#define PXP_PRETEND_CMD_PRETEND_PORT_SHIFT     13
-#define PXP_PRETEND_CMD_PRETEND_FUNCTION_MASK  0x1
-#define PXP_PRETEND_CMD_PRETEND_FUNCTION_SHIFT 14
-#define PXP_PRETEND_CMD_IS_CONCRETE_MASK       0x1
-#define PXP_PRETEND_CMD_IS_CONCRETE_SHIFT      15
+       union pxp_pretend_fid fid;
+       __le16 control;
+#define PXP_PRETEND_CMD_PATH_MASK              0x1
+#define PXP_PRETEND_CMD_PATH_SHIFT             0
+#define PXP_PRETEND_CMD_USE_PORT_MASK          0x1
+#define PXP_PRETEND_CMD_USE_PORT_SHIFT         1
+#define PXP_PRETEND_CMD_PORT_MASK              0x3
+#define PXP_PRETEND_CMD_PORT_SHIFT             2
+#define PXP_PRETEND_CMD_RESERVED0_MASK         0xF
+#define PXP_PRETEND_CMD_RESERVED0_SHIFT                4
+#define PXP_PRETEND_CMD_RESERVED1_MASK         0xF
+#define PXP_PRETEND_CMD_RESERVED1_SHIFT                8
+#define PXP_PRETEND_CMD_PRETEND_PATH_MASK      0x1
+#define PXP_PRETEND_CMD_PRETEND_PATH_SHIFT     12
+#define PXP_PRETEND_CMD_PRETEND_PORT_MASK      0x1
+#define PXP_PRETEND_CMD_PRETEND_PORT_SHIFT     13
+#define PXP_PRETEND_CMD_PRETEND_FUNCTION_MASK  0x1
+#define PXP_PRETEND_CMD_PRETEND_FUNCTION_SHIFT 14
+#define PXP_PRETEND_CMD_IS_CONCRETE_MASK       0x1
+#define PXP_PRETEND_CMD_IS_CONCRETE_SHIFT      15
 };
 
-/* PTT Record in PXP Admin Window. */
+/* PTT Record in PXP Admin Window */
 struct pxp_ptt_entry {
-       __le32                  offset;
-#define PXP_PTT_ENTRY_OFFSET_MASK     0x7FFFFF
-#define PXP_PTT_ENTRY_OFFSET_SHIFT    0
-#define PXP_PTT_ENTRY_RESERVED0_MASK  0x1FF
-#define PXP_PTT_ENTRY_RESERVED0_SHIFT 23
-       struct pxp_pretend_cmd  pretend;
+       __le32 offset;
+#define PXP_PTT_ENTRY_OFFSET_MASK      0x7FFFFF
+#define PXP_PTT_ENTRY_OFFSET_SHIFT     0
+#define PXP_PTT_ENTRY_RESERVED0_MASK   0x1FF
+#define PXP_PTT_ENTRY_RESERVED0_SHIFT  23
+       struct pxp_pretend_cmd pretend;
 };
 
-/* VF Zone A Permission Register. */
+/* VF Zone A Permission Register */
 struct pxp_vf_zone_a_permission {
        __le32 control;
-#define PXP_VF_ZONE_A_PERMISSION_VFID_MASK     0xFF
-#define PXP_VF_ZONE_A_PERMISSION_VFID_SHIFT    0
-#define PXP_VF_ZONE_A_PERMISSION_VALID_MASK    0x1
-#define PXP_VF_ZONE_A_PERMISSION_VALID_SHIFT   8
-#define PXP_VF_ZONE_A_PERMISSION_RESERVED0_MASK        0x7F
-#define PXP_VF_ZONE_A_PERMISSION_RESERVED0_SHIFT 9
-#define PXP_VF_ZONE_A_PERMISSION_RESERVED1_MASK        0xFFFF
-#define PXP_VF_ZONE_A_PERMISSION_RESERVED1_SHIFT 16
+#define PXP_VF_ZONE_A_PERMISSION_VFID_MASK             0xFF
+#define PXP_VF_ZONE_A_PERMISSION_VFID_SHIFT            0
+#define PXP_VF_ZONE_A_PERMISSION_VALID_MASK            0x1
+#define PXP_VF_ZONE_A_PERMISSION_VALID_SHIFT           8
+#define PXP_VF_ZONE_A_PERMISSION_RESERVED0_MASK                0x7F
+#define PXP_VF_ZONE_A_PERMISSION_RESERVED0_SHIFT       9
+#define PXP_VF_ZONE_A_PERMISSION_RESERVED1_MASK                0xFFFF
+#define PXP_VF_ZONE_A_PERMISSION_RESERVED1_SHIFT       16
 };
 
-/* RSS hash type */
+/* Rdif context */
 struct rdif_task_context {
        __le32 initial_ref_tag;
        __le16 app_tag_value;
        __le16 app_tag_mask;
        u8 flags0;
-#define RDIF_TASK_CONTEXT_IGNOREAPPTAG_MASK            0x1
-#define RDIF_TASK_CONTEXT_IGNOREAPPTAG_SHIFT           0
-#define RDIF_TASK_CONTEXT_INITIALREFTAGVALID_MASK      0x1
-#define RDIF_TASK_CONTEXT_INITIALREFTAGVALID_SHIFT     1
-#define RDIF_TASK_CONTEXT_HOSTGUARDTYPE_MASK           0x1
-#define RDIF_TASK_CONTEXT_HOSTGUARDTYPE_SHIFT          2
-#define RDIF_TASK_CONTEXT_SETERRORWITHEOP_MASK         0x1
-#define RDIF_TASK_CONTEXT_SETERRORWITHEOP_SHIFT        3
-#define RDIF_TASK_CONTEXT_PROTECTIONTYPE_MASK          0x3
-#define RDIF_TASK_CONTEXT_PROTECTIONTYPE_SHIFT         4
-#define RDIF_TASK_CONTEXT_CRC_SEED_MASK                0x1
-#define RDIF_TASK_CONTEXT_CRC_SEED_SHIFT               6
-#define RDIF_TASK_CONTEXT_KEEPREFTAGCONST_MASK         0x1
-#define RDIF_TASK_CONTEXT_KEEPREFTAGCONST_SHIFT        7
+#define RDIF_TASK_CONTEXT_IGNORE_APP_TAG_MASK          0x1
+#define RDIF_TASK_CONTEXT_IGNORE_APP_TAG_SHIFT         0
+#define RDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID_MASK   0x1
+#define RDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID_SHIFT  1
+#define RDIF_TASK_CONTEXT_HOST_GUARD_TYPE_MASK         0x1
+#define RDIF_TASK_CONTEXT_HOST_GUARD_TYPE_SHIFT                2
+#define RDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP_MASK      0x1
+#define RDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP_SHIFT     3
+#define RDIF_TASK_CONTEXT_PROTECTION_TYPE_MASK         0x3
+#define RDIF_TASK_CONTEXT_PROTECTION_TYPE_SHIFT                4
+#define RDIF_TASK_CONTEXT_CRC_SEED_MASK                        0x1
+#define RDIF_TASK_CONTEXT_CRC_SEED_SHIFT               6
+#define RDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST_MASK      0x1
+#define RDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST_SHIFT     7
        u8 partial_dif_data[7];
        __le16 partial_crc_value;
        __le16 partial_checksum_value;
        __le32 offset_in_io;
        __le16 flags1;
-#define RDIF_TASK_CONTEXT_VALIDATEGUARD_MASK           0x1
-#define RDIF_TASK_CONTEXT_VALIDATEGUARD_SHIFT          0
-#define RDIF_TASK_CONTEXT_VALIDATEAPPTAG_MASK          0x1
-#define RDIF_TASK_CONTEXT_VALIDATEAPPTAG_SHIFT         1
-#define RDIF_TASK_CONTEXT_VALIDATEREFTAG_MASK          0x1
-#define RDIF_TASK_CONTEXT_VALIDATEREFTAG_SHIFT         2
-#define RDIF_TASK_CONTEXT_FORWARDGUARD_MASK            0x1
-#define RDIF_TASK_CONTEXT_FORWARDGUARD_SHIFT           3
-#define RDIF_TASK_CONTEXT_FORWARDAPPTAG_MASK           0x1
-#define RDIF_TASK_CONTEXT_FORWARDAPPTAG_SHIFT          4
-#define RDIF_TASK_CONTEXT_FORWARDREFTAG_MASK           0x1
-#define RDIF_TASK_CONTEXT_FORWARDREFTAG_SHIFT          5
-#define RDIF_TASK_CONTEXT_INTERVALSIZE_MASK            0x7
-#define RDIF_TASK_CONTEXT_INTERVALSIZE_SHIFT           6
-#define RDIF_TASK_CONTEXT_HOSTINTERFACE_MASK           0x3
-#define RDIF_TASK_CONTEXT_HOSTINTERFACE_SHIFT          9
-#define RDIF_TASK_CONTEXT_DIFBEFOREDATA_MASK           0x1
-#define RDIF_TASK_CONTEXT_DIFBEFOREDATA_SHIFT          11
-#define RDIF_TASK_CONTEXT_RESERVED0_MASK               0x1
-#define RDIF_TASK_CONTEXT_RESERVED0_SHIFT              12
-#define RDIF_TASK_CONTEXT_NETWORKINTERFACE_MASK        0x1
-#define RDIF_TASK_CONTEXT_NETWORKINTERFACE_SHIFT       13
-#define RDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK_MASK   0x1
-#define RDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK_SHIFT  14
-#define RDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK_MASK   0x1
-#define RDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK_SHIFT  15
+#define RDIF_TASK_CONTEXT_VALIDATE_GUARD_MASK                  0x1
+#define RDIF_TASK_CONTEXT_VALIDATE_GUARD_SHIFT                 0
+#define RDIF_TASK_CONTEXT_VALIDATE_APP_TAG_MASK                        0x1
+#define RDIF_TASK_CONTEXT_VALIDATE_APP_TAG_SHIFT               1
+#define RDIF_TASK_CONTEXT_VALIDATE_REF_TAG_MASK                        0x1
+#define RDIF_TASK_CONTEXT_VALIDATE_REF_TAG_SHIFT               2
+#define RDIF_TASK_CONTEXT_FORWARD_GUARD_MASK                   0x1
+#define RDIF_TASK_CONTEXT_FORWARD_GUARD_SHIFT                  3
+#define RDIF_TASK_CONTEXT_FORWARD_APP_TAG_MASK                 0x1
+#define RDIF_TASK_CONTEXT_FORWARD_APP_TAG_SHIFT                        4
+#define RDIF_TASK_CONTEXT_FORWARD_REF_TAG_MASK                 0x1
+#define RDIF_TASK_CONTEXT_FORWARD_REF_TAG_SHIFT                        5
+#define RDIF_TASK_CONTEXT_INTERVAL_SIZE_MASK                   0x7
+#define RDIF_TASK_CONTEXT_INTERVAL_SIZE_SHIFT                  6
+#define RDIF_TASK_CONTEXT_HOST_INTERFACE_MASK                  0x3
+#define RDIF_TASK_CONTEXT_HOST_INTERFACE_SHIFT                 9
+#define RDIF_TASK_CONTEXT_DIF_BEFORE_DATA_MASK                 0x1
+#define RDIF_TASK_CONTEXT_DIF_BEFORE_DATA_SHIFT                        11
+#define RDIF_TASK_CONTEXT_RESERVED0_MASK                       0x1
+#define RDIF_TASK_CONTEXT_RESERVED0_SHIFT                      12
+#define RDIF_TASK_CONTEXT_NETWORK_INTERFACE_MASK               0x1
+#define RDIF_TASK_CONTEXT_NETWORK_INTERFACE_SHIFT              13
+#define RDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK_MASK       0x1
+#define RDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK_SHIFT      14
+#define RDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK_MASK       0x1
+#define RDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK_SHIFT      15
        __le16 state;
-#define RDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFT_MASK    0xF
-#define RDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFT_SHIFT   0
-#define RDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFT_MASK  0xF
-#define RDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFT_SHIFT 4
-#define RDIF_TASK_CONTEXT_ERRORINIO_MASK               0x1
-#define RDIF_TASK_CONTEXT_ERRORINIO_SHIFT              8
-#define RDIF_TASK_CONTEXT_CHECKSUMOVERFLOW_MASK        0x1
-#define RDIF_TASK_CONTEXT_CHECKSUMOVERFLOW_SHIFT       9
-#define RDIF_TASK_CONTEXT_REFTAGMASK_MASK              0xF
-#define RDIF_TASK_CONTEXT_REFTAGMASK_SHIFT             10
-#define RDIF_TASK_CONTEXT_RESERVED1_MASK               0x3
-#define RDIF_TASK_CONTEXT_RESERVED1_SHIFT              14
+#define RDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_MASK         0xF
+#define RDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_SHIFT                0
+#define RDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_MASK       0xF
+#define RDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_SHIFT      4
+#define RDIF_TASK_CONTEXT_ERROR_IN_IO_MASK                     0x1
+#define RDIF_TASK_CONTEXT_ERROR_IN_IO_SHIFT                    8
+#define RDIF_TASK_CONTEXT_CHECKSUM_OVERFLOW_MASK               0x1
+#define RDIF_TASK_CONTEXT_CHECKSUM_OVERFLOW_SHIFT              9
+#define RDIF_TASK_CONTEXT_REF_TAG_MASK_MASK                    0xF
+#define RDIF_TASK_CONTEXT_REF_TAG_MASK_SHIFT                   10
+#define RDIF_TASK_CONTEXT_RESERVED1_MASK                       0x3
+#define RDIF_TASK_CONTEXT_RESERVED1_SHIFT                      14
        __le32 reserved2;
 };
 
-/* RSS hash type */
-enum rss_hash_type {
-       RSS_HASH_TYPE_DEFAULT   = 0,
-       RSS_HASH_TYPE_IPV4      = 1,
-       RSS_HASH_TYPE_TCP_IPV4  = 2,
-       RSS_HASH_TYPE_IPV6      = 3,
-       RSS_HASH_TYPE_TCP_IPV6  = 4,
-       RSS_HASH_TYPE_UDP_IPV4  = 5,
-       RSS_HASH_TYPE_UDP_IPV6  = 6,
-       MAX_RSS_HASH_TYPE
-};
-
-/* status block structure */
-struct status_block {
-       __le16  pi_array[PIS_PER_SB];
+/* Status block structure */
+struct status_block_e4 {
+       __le16  pi_array[PIS_PER_SB_E4];
        __le32  sb_num;
-#define STATUS_BLOCK_SB_NUM_MASK      0x1FF
-#define STATUS_BLOCK_SB_NUM_SHIFT     0
-#define STATUS_BLOCK_ZERO_PAD_MASK    0x7F
-#define STATUS_BLOCK_ZERO_PAD_SHIFT   9
-#define STATUS_BLOCK_ZERO_PAD2_MASK   0xFFFF
-#define STATUS_BLOCK_ZERO_PAD2_SHIFT  16
+#define STATUS_BLOCK_E4_SB_NUM_MASK    0x1FF
+#define STATUS_BLOCK_E4_SB_NUM_SHIFT   0
+#define STATUS_BLOCK_E4_ZERO_PAD_MASK  0x7F
+#define STATUS_BLOCK_E4_ZERO_PAD_SHIFT 9
+#define STATUS_BLOCK_E4_ZERO_PAD2_MASK 0xFFFF
+#define STATUS_BLOCK_E4_ZERO_PAD2_SHIFT        16
        __le32 prod_index;
-#define STATUS_BLOCK_PROD_INDEX_MASK  0xFFFFFF
-#define STATUS_BLOCK_PROD_INDEX_SHIFT 0
-#define STATUS_BLOCK_ZERO_PAD3_MASK   0xFF
-#define STATUS_BLOCK_ZERO_PAD3_SHIFT  24
+#define STATUS_BLOCK_E4_PROD_INDEX_MASK                0xFFFFFF
+#define STATUS_BLOCK_E4_PROD_INDEX_SHIFT       0
+#define STATUS_BLOCK_E4_ZERO_PAD3_MASK         0xFF
+#define STATUS_BLOCK_E4_ZERO_PAD3_SHIFT                24
 };
 
+/* Tdif context */
 struct tdif_task_context {
        __le32 initial_ref_tag;
        __le16 app_tag_value;
        __le16 app_tag_mask;
-       __le16 partial_crc_valueB;
-       __le16 partial_checksum_valueB;
+       __le16 partial_crc_value_b;
+       __le16 partial_checksum_value_b;
        __le16 stateB;
-#define TDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFTB_MASK    0xF
-#define TDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFTB_SHIFT   0
-#define TDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFTB_MASK  0xF
-#define TDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFTB_SHIFT 4
-#define TDIF_TASK_CONTEXT_ERRORINIOB_MASK               0x1
-#define TDIF_TASK_CONTEXT_ERRORINIOB_SHIFT              8
-#define TDIF_TASK_CONTEXT_CHECKSUMOVERFLOW_MASK         0x1
-#define TDIF_TASK_CONTEXT_CHECKSUMOVERFLOW_SHIFT        9
-#define TDIF_TASK_CONTEXT_RESERVED0_MASK                0x3F
-#define TDIF_TASK_CONTEXT_RESERVED0_SHIFT               10
+#define TDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_B_MASK       0xF
+#define TDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_B_SHIFT      0
+#define TDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_B_MASK     0xF
+#define TDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_B_SHIFT    4
+#define TDIF_TASK_CONTEXT_ERROR_IN_IO_B_MASK                   0x1
+#define TDIF_TASK_CONTEXT_ERROR_IN_IO_B_SHIFT                  8
+#define TDIF_TASK_CONTEXT_CHECKSUM_VERFLOW_MASK                        0x1
+#define TDIF_TASK_CONTEXT_CHECKSUM_VERFLOW_SHIFT               9
+#define TDIF_TASK_CONTEXT_RESERVED0_MASK                       0x3F
+#define TDIF_TASK_CONTEXT_RESERVED0_SHIFT                      10
        u8 reserved1;
        u8 flags0;
-#define TDIF_TASK_CONTEXT_IGNOREAPPTAG_MASK             0x1
-#define TDIF_TASK_CONTEXT_IGNOREAPPTAG_SHIFT            0
-#define TDIF_TASK_CONTEXT_INITIALREFTAGVALID_MASK       0x1
-#define TDIF_TASK_CONTEXT_INITIALREFTAGVALID_SHIFT      1
-#define TDIF_TASK_CONTEXT_HOSTGUARDTYPE_MASK            0x1
-#define TDIF_TASK_CONTEXT_HOSTGUARDTYPE_SHIFT           2
-#define TDIF_TASK_CONTEXT_SETERRORWITHEOP_MASK          0x1
-#define TDIF_TASK_CONTEXT_SETERRORWITHEOP_SHIFT         3
-#define TDIF_TASK_CONTEXT_PROTECTIONTYPE_MASK           0x3
-#define TDIF_TASK_CONTEXT_PROTECTIONTYPE_SHIFT          4
-#define TDIF_TASK_CONTEXT_CRC_SEED_MASK                 0x1
-#define TDIF_TASK_CONTEXT_CRC_SEED_SHIFT                6
-#define TDIF_TASK_CONTEXT_RESERVED2_MASK                0x1
-#define TDIF_TASK_CONTEXT_RESERVED2_SHIFT               7
+#define TDIF_TASK_CONTEXT_IGNORE_APP_TAG_MASK                  0x1
+#define TDIF_TASK_CONTEXT_IGNORE_APP_TAG_SHIFT                 0
+#define TDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID_MASK           0x1
+#define TDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID_SHIFT          1
+#define TDIF_TASK_CONTEXT_HOST_GUARD_TYPE_MASK                 0x1
+#define TDIF_TASK_CONTEXT_HOST_GUARD_TYPE_SHIFT                        2
+#define TDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP_MASK              0x1
+#define TDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP_SHIFT             3
+#define TDIF_TASK_CONTEXT_PROTECTION_TYPE_MASK                 0x3
+#define TDIF_TASK_CONTEXT_PROTECTION_TYPE_SHIFT                        4
+#define TDIF_TASK_CONTEXT_CRC_SEED_MASK                                0x1
+#define TDIF_TASK_CONTEXT_CRC_SEED_SHIFT                       6
+#define TDIF_TASK_CONTEXT_RESERVED2_MASK                       0x1
+#define TDIF_TASK_CONTEXT_RESERVED2_SHIFT                      7
        __le32 flags1;
-#define TDIF_TASK_CONTEXT_VALIDATEGUARD_MASK            0x1
-#define TDIF_TASK_CONTEXT_VALIDATEGUARD_SHIFT           0
-#define TDIF_TASK_CONTEXT_VALIDATEAPPTAG_MASK           0x1
-#define TDIF_TASK_CONTEXT_VALIDATEAPPTAG_SHIFT          1
-#define TDIF_TASK_CONTEXT_VALIDATEREFTAG_MASK           0x1
-#define TDIF_TASK_CONTEXT_VALIDATEREFTAG_SHIFT          2
-#define TDIF_TASK_CONTEXT_FORWARDGUARD_MASK             0x1
-#define TDIF_TASK_CONTEXT_FORWARDGUARD_SHIFT            3
-#define TDIF_TASK_CONTEXT_FORWARDAPPTAG_MASK            0x1
-#define TDIF_TASK_CONTEXT_FORWARDAPPTAG_SHIFT           4
-#define TDIF_TASK_CONTEXT_FORWARDREFTAG_MASK            0x1
-#define TDIF_TASK_CONTEXT_FORWARDREFTAG_SHIFT           5
-#define TDIF_TASK_CONTEXT_INTERVALSIZE_MASK             0x7
-#define TDIF_TASK_CONTEXT_INTERVALSIZE_SHIFT            6
-#define TDIF_TASK_CONTEXT_HOSTINTERFACE_MASK            0x3
-#define TDIF_TASK_CONTEXT_HOSTINTERFACE_SHIFT           9
-#define TDIF_TASK_CONTEXT_DIFBEFOREDATA_MASK            0x1
-#define TDIF_TASK_CONTEXT_DIFBEFOREDATA_SHIFT           11
-#define TDIF_TASK_CONTEXT_RESERVED3_MASK                0x1
-#define TDIF_TASK_CONTEXT_RESERVED3_SHIFT               12
-#define TDIF_TASK_CONTEXT_NETWORKINTERFACE_MASK         0x1
-#define TDIF_TASK_CONTEXT_NETWORKINTERFACE_SHIFT        13
-#define TDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFTA_MASK    0xF
-#define TDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFTA_SHIFT   14
-#define TDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFTA_MASK  0xF
-#define TDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFTA_SHIFT 18
-#define TDIF_TASK_CONTEXT_ERRORINIOA_MASK               0x1
-#define TDIF_TASK_CONTEXT_ERRORINIOA_SHIFT              22
-#define TDIF_TASK_CONTEXT_CHECKSUMOVERFLOWA_MASK        0x1
-#define TDIF_TASK_CONTEXT_CHECKSUMOVERFLOWA_SHIFT       23
-#define TDIF_TASK_CONTEXT_REFTAGMASK_MASK               0xF
-#define TDIF_TASK_CONTEXT_REFTAGMASK_SHIFT              24
-#define TDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK_MASK    0x1
-#define TDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK_SHIFT   28
-#define TDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK_MASK    0x1
-#define TDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK_SHIFT   29
-#define TDIF_TASK_CONTEXT_KEEPREFTAGCONST_MASK          0x1
-#define TDIF_TASK_CONTEXT_KEEPREFTAGCONST_SHIFT         30
-#define TDIF_TASK_CONTEXT_RESERVED4_MASK                0x1
-#define TDIF_TASK_CONTEXT_RESERVED4_SHIFT               31
-       __le32 offset_in_iob;
+#define TDIF_TASK_CONTEXT_VALIDATE_GUARD_MASK                  0x1
+#define TDIF_TASK_CONTEXT_VALIDATE_GUARD_SHIFT                 0
+#define TDIF_TASK_CONTEXT_VALIDATE_APP_TAG_MASK                        0x1
+#define TDIF_TASK_CONTEXT_VALIDATE_APP_TAG_SHIFT               1
+#define TDIF_TASK_CONTEXT_VALIDATE_REF_TAG_MASK                        0x1
+#define TDIF_TASK_CONTEXT_VALIDATE_REF_TAG_SHIFT               2
+#define TDIF_TASK_CONTEXT_FORWARD_GUARD_MASK                   0x1
+#define TDIF_TASK_CONTEXT_FORWARD_GUARD_SHIFT                  3
+#define TDIF_TASK_CONTEXT_FORWARD_APP_TAG_MASK                 0x1
+#define TDIF_TASK_CONTEXT_FORWARD_APP_TAG_SHIFT                        4
+#define TDIF_TASK_CONTEXT_FORWARD_REF_TAG_MASK                 0x1
+#define TDIF_TASK_CONTEXT_FORWARD_REF_TAG_SHIFT                        5
+#define TDIF_TASK_CONTEXT_INTERVAL_SIZE_MASK                   0x7
+#define TDIF_TASK_CONTEXT_INTERVAL_SIZE_SHIFT                  6
+#define TDIF_TASK_CONTEXT_HOST_INTERFACE_MASK                  0x3
+#define TDIF_TASK_CONTEXT_HOST_INTERFACE_SHIFT                 9
+#define TDIF_TASK_CONTEXT_DIF_BEFORE_DATA_MASK                 0x1
+#define TDIF_TASK_CONTEXT_DIF_BEFORE_DATA_SHIFT                        11
+#define TDIF_TASK_CONTEXT_RESERVED3_MASK                       0x1
+#define TDIF_TASK_CONTEXT_RESERVED3_SHIFT                      12
+#define TDIF_TASK_CONTEXT_NETWORK_INTERFACE_MASK               0x1
+#define TDIF_TASK_CONTEXT_NETWORK_INTERFACE_SHIFT              13
+#define TDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_A_MASK       0xF
+#define TDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_A_SHIFT      14
+#define TDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_A_MASK     0xF
+#define TDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_A_SHIFT    18
+#define TDIF_TASK_CONTEXT_ERROR_IN_IO_A_MASK                   0x1
+#define TDIF_TASK_CONTEXT_ERROR_IN_IO_A_SHIFT                  22
+#define TDIF_TASK_CONTEXT_CHECKSUM_OVERFLOW_A_MASK             0x1
+#define TDIF_TASK_CONTEXT_CHECKSUM_OVERFLOW_A_SHIFT            23
+#define TDIF_TASK_CONTEXT_REF_TAG_MASK_MASK                    0xF
+#define TDIF_TASK_CONTEXT_REF_TAG_MASK_SHIFT                   24
+#define TDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK_MASK       0x1
+#define TDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK_SHIFT      28
+#define TDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK_MASK       0x1
+#define TDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK_SHIFT      29
+#define TDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST_MASK              0x1
+#define TDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST_SHIFT             30
+#define TDIF_TASK_CONTEXT_RESERVED4_MASK                       0x1
+#define TDIF_TASK_CONTEXT_RESERVED4_SHIFT                      31
+       __le32 offset_in_io_b;
        __le16 partial_crc_value_a;
-       __le16 partial_checksum_valuea_;
-       __le32 offset_in_ioa;
+       __le16 partial_checksum_value_a;
+       __le32 offset_in_io_a;
        u8 partial_dif_data_a[8];
        u8 partial_dif_data_b[8];
 };
 
+/* Timers context */
 struct timers_context {
        __le32 logical_client_0;
 #define TIMERS_CONTEXT_EXPIRATIONTIMELC0_MASK  0x7FFFFFF
@@ -1385,6 +1378,7 @@ struct timers_context {
 #define TIMERS_CONTEXT_RESERVED7_SHIFT                 29
 };
 
+/* Enum for next_protocol field of tunnel_parsing_flags / tunnelTypeDesc */
 enum tunnel_next_protocol {
        e_unknown = 0,
        e_l2 = 1,
index cb06e6e..9db0285 100644 (file)
 /********************/
 /* ETH FW CONSTANTS */
 /********************/
-#define ETH_HSI_VER_MAJOR                   3
-#define ETH_HSI_VER_MINOR      10
+
+#define ETH_HSI_VER_MAJOR              3
+#define ETH_HSI_VER_MINOR              10
 
 #define ETH_HSI_VER_NO_PKT_LEN_TUNN    5
 
-#define ETH_CACHE_LINE_SIZE                 64
-#define ETH_RX_CQE_GAP 32
-#define ETH_MAX_RAMROD_PER_CON                          8
-#define ETH_TX_BD_PAGE_SIZE_BYTES                       4096
-#define ETH_RX_BD_PAGE_SIZE_BYTES                       4096
-#define ETH_RX_CQE_PAGE_SIZE_BYTES                      4096
-#define ETH_RX_NUM_NEXT_PAGE_BDS                        2
-
-#define ETH_MAX_TUNN_LSO_INNER_IPV4_OFFSET          253
-#define ETH_MAX_TUNN_LSO_INNER_IPV6_OFFSET          251
-
-#define ETH_TX_MIN_BDS_PER_NON_LSO_PKT                          1
-#define ETH_TX_MAX_BDS_PER_NON_LSO_PACKET                       18
-#define ETH_TX_MAX_BDS_PER_LSO_PACKET  255
-#define ETH_TX_MAX_LSO_HDR_NBD                                          4
-#define ETH_TX_MIN_BDS_PER_LSO_PKT                                      3
-#define ETH_TX_MIN_BDS_PER_TUNN_IPV6_WITH_EXT_PKT       3
-#define ETH_TX_MIN_BDS_PER_IPV6_WITH_EXT_PKT            2
-#define ETH_TX_MIN_BDS_PER_PKT_W_LOOPBACK_MODE          2
-#define ETH_TX_MAX_NON_LSO_PKT_LEN     (9700 - (4 + 4 + 12 + 8))
-#define ETH_TX_MAX_LSO_HDR_BYTES                    510
-#define ETH_TX_LSO_WINDOW_BDS_NUM      (18 - 1)
-#define ETH_TX_LSO_WINDOW_MIN_LEN      9700
-#define ETH_TX_MAX_LSO_PAYLOAD_LEN     0xFE000
-#define ETH_TX_NUM_SAME_AS_LAST_ENTRIES        320
-#define ETH_TX_INACTIVE_SAME_AS_LAST   0xFFFF
-
-#define ETH_NUM_STATISTIC_COUNTERS                      MAX_NUM_VPORTS
+#define ETH_CACHE_LINE_SIZE            64
+#define ETH_RX_CQE_GAP                 32
+#define ETH_MAX_RAMROD_PER_CON         8
+#define ETH_TX_BD_PAGE_SIZE_BYTES      4096
+#define ETH_RX_BD_PAGE_SIZE_BYTES      4096
+#define ETH_RX_CQE_PAGE_SIZE_BYTES     4096
+#define ETH_RX_NUM_NEXT_PAGE_BDS       2
+
+#define ETH_MAX_TUNN_LSO_INNER_IPV4_OFFSET     253
+#define ETH_MAX_TUNN_LSO_INNER_IPV6_OFFSET     251
+
+#define ETH_TX_MIN_BDS_PER_NON_LSO_PKT                 1
+#define ETH_TX_MAX_BDS_PER_NON_LSO_PACKET              18
+#define ETH_TX_MAX_BDS_PER_LSO_PACKET                  255
+#define ETH_TX_MAX_LSO_HDR_NBD                         4
+#define ETH_TX_MIN_BDS_PER_LSO_PKT                     3
+#define ETH_TX_MIN_BDS_PER_TUNN_IPV6_WITH_EXT_PKT      3
+#define ETH_TX_MIN_BDS_PER_IPV6_WITH_EXT_PKT           2
+#define ETH_TX_MIN_BDS_PER_PKT_W_LOOPBACK_MODE         2
+#define ETH_TX_MAX_NON_LSO_PKT_LEN             (9700 - (4 + 4 + 12 + 8))
+#define ETH_TX_MAX_LSO_HDR_BYTES                       510
+#define ETH_TX_LSO_WINDOW_BDS_NUM                      (18 - 1)
+#define ETH_TX_LSO_WINDOW_MIN_LEN                      9700
+#define ETH_TX_MAX_LSO_PAYLOAD_LEN                     0xFE000
+#define ETH_TX_NUM_SAME_AS_LAST_ENTRIES                        320
+#define ETH_TX_INACTIVE_SAME_AS_LAST                   0xFFFF
+
+#define ETH_NUM_STATISTIC_COUNTERS                     MAX_NUM_VPORTS
 #define ETH_NUM_STATISTIC_COUNTERS_DOUBLE_VF_ZONE \
        (ETH_NUM_STATISTIC_COUNTERS - MAX_NUM_VFS / 2)
 #define ETH_NUM_STATISTIC_COUNTERS_QUAD_VF_ZONE \
        (ETH_NUM_STATISTIC_COUNTERS - 3 * MAX_NUM_VFS / 4)
 
 /* Maximum number of buffers, used for RX packet placement */
-#define ETH_RX_MAX_BUFF_PER_PKT        5
-#define ETH_RX_BD_THRESHOLD    12
+#define ETH_RX_MAX_BUFF_PER_PKT                5
+#define ETH_RX_BD_THRESHOLD            12
 
-/* num of MAC/VLAN filters */
-#define ETH_NUM_MAC_FILTERS                                     512
-#define ETH_NUM_VLAN_FILTERS                            512
+/* Num of MAC/VLAN filters */
+#define ETH_NUM_MAC_FILTERS            512
+#define ETH_NUM_VLAN_FILTERS           512
 
-/* approx. multicast constants */
-#define ETH_MULTICAST_BIN_FROM_MAC_SEED     0
-#define ETH_MULTICAST_MAC_BINS                          256
-#define ETH_MULTICAST_MAC_BINS_IN_REGS          (ETH_MULTICAST_MAC_BINS / 32)
+/* Approx. multicast constants */
+#define ETH_MULTICAST_BIN_FROM_MAC_SEED        0
+#define ETH_MULTICAST_MAC_BINS         256
+#define ETH_MULTICAST_MAC_BINS_IN_REGS (ETH_MULTICAST_MAC_BINS / 32)
 
-/*  ethernet vport update constants */
-#define ETH_FILTER_RULES_COUNT                          10
-#define ETH_RSS_IND_TABLE_ENTRIES_NUM           128
-#define ETH_RSS_KEY_SIZE_REGS                       10
-#define ETH_RSS_ENGINE_NUM_K2               207
-#define ETH_RSS_ENGINE_NUM_BB               127
+/* Ethernet vport update constants */
+#define ETH_FILTER_RULES_COUNT         10
+#define ETH_RSS_IND_TABLE_ENTRIES_NUM  128
+#define ETH_RSS_KEY_SIZE_REGS          10
+#define ETH_RSS_ENGINE_NUM_K2          207
+#define ETH_RSS_ENGINE_NUM_BB          127
 
 /* TPA constants */
-#define ETH_TPA_MAX_AGGS_NUM              64
-#define ETH_TPA_CQE_START_LEN_LIST_SIZE   ETH_RX_MAX_BUFF_PER_PKT
-#define ETH_TPA_CQE_CONT_LEN_LIST_SIZE    6
-#define ETH_TPA_CQE_END_LEN_LIST_SIZE     4
+#define ETH_TPA_MAX_AGGS_NUM           64
+#define ETH_TPA_CQE_START_LEN_LIST_SIZE        ETH_RX_MAX_BUFF_PER_PKT
+#define ETH_TPA_CQE_CONT_LEN_LIST_SIZE 6
+#define ETH_TPA_CQE_END_LEN_LIST_SIZE  4
 
 /* Control frame check constants */
 #define ETH_CTL_FRAME_ETH_TYPE_NUM     4
 
+/* GFS constants */
+#define ETH_GFT_TRASH_CAN_VPORT                0x1FF
+
+/* Destination port mode */
+enum dest_port_mode {
+       DEST_PORT_PHY,
+       DEST_PORT_LOOPBACK,
+       DEST_PORT_PHY_LOOPBACK,
+       DEST_PORT_DROP,
+       MAX_DEST_PORT_MODE
+};
+
+/* Ethernet address type */
+enum eth_addr_type {
+       BROADCAST_ADDRESS,
+       MULTICAST_ADDRESS,
+       UNICAST_ADDRESS,
+       UNKNOWN_ADDRESS,
+       MAX_ETH_ADDR_TYPE
+};
+
 struct eth_tx_1st_bd_flags {
        u8 bitfields;
-#define ETH_TX_1ST_BD_FLAGS_START_BD_MASK         0x1
-#define ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT        0
-#define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_MASK  0x1
-#define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_SHIFT 1
-#define ETH_TX_1ST_BD_FLAGS_IP_CSUM_MASK          0x1
-#define ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT         2
-#define ETH_TX_1ST_BD_FLAGS_L4_CSUM_MASK          0x1
-#define ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT         3
-#define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_MASK   0x1
-#define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT  4
-#define ETH_TX_1ST_BD_FLAGS_LSO_MASK              0x1
-#define ETH_TX_1ST_BD_FLAGS_LSO_SHIFT             5
-#define ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_MASK     0x1
-#define ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT    6
-#define ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK     0x1
-#define ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT    7
+#define ETH_TX_1ST_BD_FLAGS_START_BD_MASK              0x1
+#define ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT             0
+#define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_MASK       0x1
+#define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_SHIFT      1
+#define ETH_TX_1ST_BD_FLAGS_IP_CSUM_MASK               0x1
+#define ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT              2
+#define ETH_TX_1ST_BD_FLAGS_L4_CSUM_MASK               0x1
+#define ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT              3
+#define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_MASK                0x1
+#define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT       4
+#define ETH_TX_1ST_BD_FLAGS_LSO_MASK                   0x1
+#define ETH_TX_1ST_BD_FLAGS_LSO_SHIFT                  5
+#define ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_MASK          0x1
+#define ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT         6
+#define ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK          0x1
+#define ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT         7
 };
 
-/* The parsing information data fo rthe first tx bd of a given packet. */
+/* The parsing information data fo rthe first tx bd of a given packet */
 struct eth_tx_data_1st_bd {
        __le16 vlan;
        u8 nbds;
        struct eth_tx_1st_bd_flags bd_flags;
        __le16 bitfields;
-#define ETH_TX_DATA_1ST_BD_TUNN_FLAG_MASK  0x1
-#define ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT 0
-#define ETH_TX_DATA_1ST_BD_RESERVED0_MASK          0x1
-#define ETH_TX_DATA_1ST_BD_RESERVED0_SHIFT         1
-#define ETH_TX_DATA_1ST_BD_PKT_LEN_MASK    0x3FFF
-#define ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT   2
+#define ETH_TX_DATA_1ST_BD_TUNN_FLAG_MASK      0x1
+#define ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT     0
+#define ETH_TX_DATA_1ST_BD_RESERVED0_MASK      0x1
+#define ETH_TX_DATA_1ST_BD_RESERVED0_SHIFT     1
+#define ETH_TX_DATA_1ST_BD_PKT_LEN_MASK                0x3FFF
+#define ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT       2
 };
 
-/* The parsing information data for the second tx bd of a given packet. */
+/* The parsing information data for the second tx bd of a given packet */
 struct eth_tx_data_2nd_bd {
        __le16 tunn_ip_size;
        __le16  bitfields1;
-#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_MASK  0xF
-#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_SHIFT 0
-#define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_MASK       0x3
-#define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_SHIFT      4
-#define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_MASK            0x3
-#define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_SHIFT           6
-#define ETH_TX_DATA_2ND_BD_START_BD_MASK                  0x1
-#define ETH_TX_DATA_2ND_BD_START_BD_SHIFT                 8
-#define ETH_TX_DATA_2ND_BD_TUNN_TYPE_MASK                 0x3
-#define ETH_TX_DATA_2ND_BD_TUNN_TYPE_SHIFT                9
-#define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_MASK           0x1
-#define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_SHIFT          11
-#define ETH_TX_DATA_2ND_BD_IPV6_EXT_MASK                  0x1
-#define ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT                 12
-#define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_MASK             0x1
-#define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_SHIFT            13
-#define ETH_TX_DATA_2ND_BD_L4_UDP_MASK                    0x1
-#define ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT                   14
-#define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_MASK       0x1
-#define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT      15
+#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_MASK       0xF
+#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_SHIFT      0
+#define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_MASK            0x3
+#define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_SHIFT           4
+#define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_MASK                 0x3
+#define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_SHIFT                        6
+#define ETH_TX_DATA_2ND_BD_START_BD_MASK                       0x1
+#define ETH_TX_DATA_2ND_BD_START_BD_SHIFT                      8
+#define ETH_TX_DATA_2ND_BD_TUNN_TYPE_MASK                      0x3
+#define ETH_TX_DATA_2ND_BD_TUNN_TYPE_SHIFT                     9
+#define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_MASK                        0x1
+#define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_SHIFT               11
+#define ETH_TX_DATA_2ND_BD_IPV6_EXT_MASK                       0x1
+#define ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT                      12
+#define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_MASK                  0x1
+#define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_SHIFT                 13
+#define ETH_TX_DATA_2ND_BD_L4_UDP_MASK                         0x1
+#define ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT                                14
+#define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_MASK            0x1
+#define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT           15
        __le16 bitfields2;
-#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK     0x1FFF
-#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT    0
-#define ETH_TX_DATA_2ND_BD_RESERVED0_MASK                 0x7
-#define ETH_TX_DATA_2ND_BD_RESERVED0_SHIFT                13
+#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK          0x1FFF
+#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT         0
+#define ETH_TX_DATA_2ND_BD_RESERVED0_MASK                      0x7
+#define ETH_TX_DATA_2ND_BD_RESERVED0_SHIFT                     13
 };
 
-/* Firmware data for L2-EDPM packet. */
+/* Firmware data for L2-EDPM packet */
 struct eth_edpm_fw_data {
        struct eth_tx_data_1st_bd data_1st_bd;
        struct eth_tx_data_2nd_bd data_2nd_bd;
        __le32 reserved;
 };
 
-struct eth_fast_path_cqe_fw_debug {
-       __le16 reserved2;
-};
-
-/*  tunneling parsing flags */
+/* Tunneling parsing flags */
 struct eth_tunnel_parsing_flags {
        u8 flags;
 #define        ETH_TUNNEL_PARSING_FLAGS_TYPE_MASK              0x3
@@ -199,24 +217,24 @@ struct eth_tunnel_parsing_flags {
 /* PMD flow control bits */
 struct eth_pmd_flow_flags {
        u8 flags;
-#define ETH_PMD_FLOW_FLAGS_VALID_MASK  0x1
-#define ETH_PMD_FLOW_FLAGS_VALID_SHIFT 0
-#define ETH_PMD_FLOW_FLAGS_TOGGLE_MASK 0x1
-#define ETH_PMD_FLOW_FLAGS_TOGGLE_SHIFT        1
-#define ETH_PMD_FLOW_FLAGS_RESERVED_MASK 0x3F
-#define ETH_PMD_FLOW_FLAGS_RESERVED_SHIFT 2
+#define ETH_PMD_FLOW_FLAGS_VALID_MASK          0x1
+#define ETH_PMD_FLOW_FLAGS_VALID_SHIFT         0
+#define ETH_PMD_FLOW_FLAGS_TOGGLE_MASK         0x1
+#define ETH_PMD_FLOW_FLAGS_TOGGLE_SHIFT                1
+#define ETH_PMD_FLOW_FLAGS_RESERVED_MASK       0x3F
+#define ETH_PMD_FLOW_FLAGS_RESERVED_SHIFT      2
 };
 
-/* Regular ETH Rx FP CQE. */
+/* Regular ETH Rx FP CQE */
 struct eth_fast_path_rx_reg_cqe {
        u8 type;
        u8 bitfields;
-#define ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK  0x7
-#define ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT 0
-#define ETH_FAST_PATH_RX_REG_CQE_TC_MASK             0xF
-#define ETH_FAST_PATH_RX_REG_CQE_TC_SHIFT            3
-#define ETH_FAST_PATH_RX_REG_CQE_RESERVED0_MASK      0x1
-#define ETH_FAST_PATH_RX_REG_CQE_RESERVED0_SHIFT     7
+#define ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK    0x7
+#define ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT   0
+#define ETH_FAST_PATH_RX_REG_CQE_TC_MASK               0xF
+#define ETH_FAST_PATH_RX_REG_CQE_TC_SHIFT              3
+#define ETH_FAST_PATH_RX_REG_CQE_RESERVED0_MASK                0x1
+#define ETH_FAST_PATH_RX_REG_CQE_RESERVED0_SHIFT       7
        __le16 pkt_len;
        struct parsing_and_err_flags pars_flags;
        __le16 vlan_tag;
@@ -225,13 +243,13 @@ struct eth_fast_path_rx_reg_cqe {
        u8 placement_offset;
        struct eth_tunnel_parsing_flags tunnel_pars_flags;
        u8 bd_num;
-       u8 reserved[9];
-       struct eth_fast_path_cqe_fw_debug fw_debug;
-       u8 reserved1[3];
+       u8 reserved;
+       __le16 flow_id;
+       u8 reserved1[11];
        struct eth_pmd_flow_flags pmd_flags;
 };
 
-/* TPA-continue ETH Rx FP CQE. */
+/* TPA-continue ETH Rx FP CQE */
 struct eth_fast_path_rx_tpa_cont_cqe {
        u8 type;
        u8 tpa_agg_index;
@@ -243,7 +261,7 @@ struct eth_fast_path_rx_tpa_cont_cqe {
        struct eth_pmd_flow_flags pmd_flags;
 };
 
-/* TPA-end ETH Rx FP CQE. */
+/* TPA-end ETH Rx FP CQE */
 struct eth_fast_path_rx_tpa_end_cqe {
        u8 type;
        u8 tpa_agg_index;
@@ -259,16 +277,16 @@ struct eth_fast_path_rx_tpa_end_cqe {
        struct eth_pmd_flow_flags pmd_flags;
 };
 
-/* TPA-start ETH Rx FP CQE. */
+/* TPA-start ETH Rx FP CQE */
 struct eth_fast_path_rx_tpa_start_cqe {
        u8 type;
        u8 bitfields;
-#define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_MASK  0x7
-#define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_SHIFT 0
-#define ETH_FAST_PATH_RX_TPA_START_CQE_TC_MASK             0xF
-#define ETH_FAST_PATH_RX_TPA_START_CQE_TC_SHIFT            3
-#define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_MASK      0x1
-#define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_SHIFT     7
+#define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_MASK      0x7
+#define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_SHIFT     0
+#define ETH_FAST_PATH_RX_TPA_START_CQE_TC_MASK                 0xF
+#define ETH_FAST_PATH_RX_TPA_START_CQE_TC_SHIFT                        3
+#define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_MASK          0x1
+#define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_SHIFT         7
        __le16 seg_len;
        struct parsing_and_err_flags pars_flags;
        __le16 vlan_tag;
@@ -279,7 +297,7 @@ struct eth_fast_path_rx_tpa_start_cqe {
        u8 tpa_agg_index;
        u8 header_len;
        __le16 ext_bd_len_list[ETH_TPA_CQE_START_LEN_LIST_SIZE];
-       struct eth_fast_path_cqe_fw_debug fw_debug;
+       __le16 flow_id;
        u8 reserved;
        struct eth_pmd_flow_flags pmd_flags;
 };
@@ -295,24 +313,24 @@ struct eth_rx_bd {
        struct regpair addr;
 };
 
-/* regular ETH Rx SP CQE */
+/* Regular ETH Rx SP CQE */
 struct eth_slow_path_rx_cqe {
-       u8      type;
-       u8      ramrod_cmd_id;
-       u8      error_flag;
-       u8      reserved[25];
-       __le16  echo;
-       u8      reserved1;
+       u8 type;
+       u8 ramrod_cmd_id;
+       u8 error_flag;
+       u8 reserved[25];
+       __le16 echo;
+       u8 reserved1;
        struct eth_pmd_flow_flags pmd_flags;
 };
 
-/* union for all ETH Rx CQE types */
+/* Union for all ETH Rx CQE types */
 union eth_rx_cqe {
-       struct eth_fast_path_rx_reg_cqe         fast_path_regular;
-       struct eth_fast_path_rx_tpa_start_cqe   fast_path_tpa_start;
-       struct eth_fast_path_rx_tpa_cont_cqe    fast_path_tpa_cont;
-       struct eth_fast_path_rx_tpa_end_cqe     fast_path_tpa_end;
-       struct eth_slow_path_rx_cqe             slow_path;
+       struct eth_fast_path_rx_reg_cqe fast_path_regular;
+       struct eth_fast_path_rx_tpa_start_cqe fast_path_tpa_start;
+       struct eth_fast_path_rx_tpa_cont_cqe fast_path_tpa_cont;
+       struct eth_fast_path_rx_tpa_end_cqe fast_path_tpa_end;
+       struct eth_slow_path_rx_cqe slow_path;
 };
 
 /* ETH Rx CQE type */
@@ -339,7 +357,7 @@ enum eth_rx_tunn_type {
        MAX_ETH_RX_TUNN_TYPE
 };
 
-/*  Aggregation end reason. */
+/* Aggregation end reason. */
 enum eth_tpa_end_reason {
        ETH_AGG_END_UNUSED,
        ETH_AGG_END_SP_UPDATE,
@@ -354,59 +372,59 @@ enum eth_tpa_end_reason {
 
 /* The first tx bd of a given packet */
 struct eth_tx_1st_bd {
-       struct regpair                  addr;
-       __le16                          nbytes;
-       struct eth_tx_data_1st_bd       data;
+       struct regpair addr;
+       __le16 nbytes;
+       struct eth_tx_data_1st_bd data;
 };
 
 /* The second tx bd of a given packet */
 struct eth_tx_2nd_bd {
-       struct regpair                  addr;
-       __le16                          nbytes;
-       struct eth_tx_data_2nd_bd       data;
+       struct regpair addr;
+       __le16 nbytes;
+       struct eth_tx_data_2nd_bd data;
 };
 
-/* The parsing information data for the third tx bd of a given packet. */
+/* The parsing information data for the third tx bd of a given packet */
 struct eth_tx_data_3rd_bd {
        __le16 lso_mss;
        __le16 bitfields;
-#define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK  0xF
-#define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT 0
-#define ETH_TX_DATA_3RD_BD_HDR_NBD_MASK         0xF
-#define ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT        4
-#define ETH_TX_DATA_3RD_BD_START_BD_MASK        0x1
-#define ETH_TX_DATA_3RD_BD_START_BD_SHIFT       8
-#define ETH_TX_DATA_3RD_BD_RESERVED0_MASK       0x7F
-#define ETH_TX_DATA_3RD_BD_RESERVED0_SHIFT      9
+#define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK 0xF
+#define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT        0
+#define ETH_TX_DATA_3RD_BD_HDR_NBD_MASK                0xF
+#define ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT       4
+#define ETH_TX_DATA_3RD_BD_START_BD_MASK       0x1
+#define ETH_TX_DATA_3RD_BD_START_BD_SHIFT      8
+#define ETH_TX_DATA_3RD_BD_RESERVED0_MASK      0x7F
+#define ETH_TX_DATA_3RD_BD_RESERVED0_SHIFT     9
        u8 tunn_l4_hdr_start_offset_w;
        u8 tunn_hdr_size_w;
 };
 
 /* The third tx bd of a given packet */
 struct eth_tx_3rd_bd {
-       struct regpair                  addr;
-       __le16                          nbytes;
-       struct eth_tx_data_3rd_bd       data;
+       struct regpair addr;
+       __le16 nbytes;
+       struct eth_tx_data_3rd_bd data;
 };
 
-/* Complementary information for the regular tx bd of a given packet. */
+/* Complementary information for the regular tx bd of a given packet */
 struct eth_tx_data_bd {
-       __le16  reserved0;
-       __le16  bitfields;
-#define ETH_TX_DATA_BD_RESERVED1_MASK  0xFF
-#define ETH_TX_DATA_BD_RESERVED1_SHIFT 0
-#define ETH_TX_DATA_BD_START_BD_MASK   0x1
-#define ETH_TX_DATA_BD_START_BD_SHIFT  8
-#define ETH_TX_DATA_BD_RESERVED2_MASK  0x7F
-#define ETH_TX_DATA_BD_RESERVED2_SHIFT 9
+       __le16 reserved0;
+       __le16 bitfields;
+#define ETH_TX_DATA_BD_RESERVED1_MASK  0xFF
+#define ETH_TX_DATA_BD_RESERVED1_SHIFT 0
+#define ETH_TX_DATA_BD_START_BD_MASK   0x1
+#define ETH_TX_DATA_BD_START_BD_SHIFT  8
+#define ETH_TX_DATA_BD_RESERVED2_MASK  0x7F
+#define ETH_TX_DATA_BD_RESERVED2_SHIFT 9
        __le16 reserved3;
 };
 
 /* The common non-special TX BD ring element */
 struct eth_tx_bd {
-       struct regpair  addr;
-       __le16          nbytes;
-       struct eth_tx_data_bd   data;
+       struct regpair addr;
+       __le16 nbytes;
+       struct eth_tx_data_bd data;
 };
 
 union eth_tx_bd_types {
@@ -434,18 +452,30 @@ struct xstorm_eth_queue_zone {
 /* ETH doorbell data */
 struct eth_db_data {
        u8 params;
-#define ETH_DB_DATA_DEST_MASK         0x3
-#define ETH_DB_DATA_DEST_SHIFT        0
-#define ETH_DB_DATA_AGG_CMD_MASK      0x3
-#define ETH_DB_DATA_AGG_CMD_SHIFT     2
-#define ETH_DB_DATA_BYPASS_EN_MASK    0x1
-#define ETH_DB_DATA_BYPASS_EN_SHIFT   4
-#define ETH_DB_DATA_RESERVED_MASK     0x1
-#define ETH_DB_DATA_RESERVED_SHIFT    5
-#define ETH_DB_DATA_AGG_VAL_SEL_MASK  0x3
-#define ETH_DB_DATA_AGG_VAL_SEL_SHIFT 6
+#define ETH_DB_DATA_DEST_MASK          0x3
+#define ETH_DB_DATA_DEST_SHIFT         0
+#define ETH_DB_DATA_AGG_CMD_MASK       0x3
+#define ETH_DB_DATA_AGG_CMD_SHIFT      2
+#define ETH_DB_DATA_BYPASS_EN_MASK     0x1
+#define ETH_DB_DATA_BYPASS_EN_SHIFT    4
+#define ETH_DB_DATA_RESERVED_MASK      0x1
+#define ETH_DB_DATA_RESERVED_SHIFT     5
+#define ETH_DB_DATA_AGG_VAL_SEL_MASK   0x3
+#define ETH_DB_DATA_AGG_VAL_SEL_SHIFT  6
        u8 agg_flags;
        __le16 bd_prod;
 };
 
+/* RSS hash type */
+enum rss_hash_type {
+       RSS_HASH_TYPE_DEFAULT = 0,
+       RSS_HASH_TYPE_IPV4 = 1,
+       RSS_HASH_TYPE_TCP_IPV4 = 2,
+       RSS_HASH_TYPE_IPV6 = 3,
+       RSS_HASH_TYPE_TCP_IPV6 = 4,
+       RSS_HASH_TYPE_UDP_IPV4 = 5,
+       RSS_HASH_TYPE_UDP_IPV6 = 6,
+       MAX_RSS_HASH_TYPE
+};
+
 #endif /* __ETH_COMMON__ */
index 12fc9e7..22077c5 100644 (file)
 
 #ifndef __FCOE_COMMON__
 #define __FCOE_COMMON__
+
 /*********************/
 /* FCOE FW CONSTANTS */
 /*********************/
 
 #define FC_ABTS_REPLY_MAX_PAYLOAD_LEN  12
 
-struct fcoe_abts_pkt {
-       __le32 abts_rsp_fc_payload_lo;
-       __le16 abts_rsp_rx_id;
-       u8 abts_rsp_rctl;
-       u8 reserved2;
-};
-
-/* FCoE additional WQE (Sq/XferQ) information */
-union fcoe_additional_info_union {
-       __le32 previous_tid;
-       __le32 parent_tid;
-       __le32 burst_length;
-       __le32 seq_rec_updated_offset;
-};
-
-struct fcoe_exp_ro {
-       __le32 data_offset;
-       __le32 reserved;
-};
-
-union fcoe_cleanup_addr_exp_ro_union {
-       struct regpair abts_rsp_fc_payload_hi;
-       struct fcoe_exp_ro exp_ro;
-};
-
-/* FCoE Ramrod Command IDs */
-enum fcoe_completion_status {
-       FCOE_COMPLETION_STATUS_SUCCESS,
-       FCOE_COMPLETION_STATUS_FCOE_VER_ERR,
-       FCOE_COMPLETION_STATUS_SRC_MAC_ADD_ARR_ERR,
-       MAX_FCOE_COMPLETION_STATUS
-};
-
-struct fc_addr_nw {
-       u8 addr_lo;
-       u8 addr_mid;
-       u8 addr_hi;
-};
-
-/* FCoE connection offload */
-struct fcoe_conn_offload_ramrod_data {
-       struct regpair sq_pbl_addr;
-       struct regpair sq_curr_page_addr;
-       struct regpair sq_next_page_addr;
-       struct regpair xferq_pbl_addr;
-       struct regpair xferq_curr_page_addr;
-       struct regpair xferq_next_page_addr;
-       struct regpair respq_pbl_addr;
-       struct regpair respq_curr_page_addr;
-       struct regpair respq_next_page_addr;
-       __le16 dst_mac_addr_lo;
-       __le16 dst_mac_addr_mid;
-       __le16 dst_mac_addr_hi;
-       __le16 src_mac_addr_lo;
-       __le16 src_mac_addr_mid;
-       __le16 src_mac_addr_hi;
-       __le16 tx_max_fc_pay_len;
-       __le16 e_d_tov_timer_val;
-       __le16 rx_max_fc_pay_len;
-       __le16 vlan_tag;
-#define FCOE_CONN_OFFLOAD_RAMROD_DATA_VLAN_ID_MASK              0xFFF
-#define FCOE_CONN_OFFLOAD_RAMROD_DATA_VLAN_ID_SHIFT             0
-#define FCOE_CONN_OFFLOAD_RAMROD_DATA_CFI_MASK                  0x1
-#define FCOE_CONN_OFFLOAD_RAMROD_DATA_CFI_SHIFT                 12
-#define FCOE_CONN_OFFLOAD_RAMROD_DATA_PRIORITY_MASK             0x7
-#define FCOE_CONN_OFFLOAD_RAMROD_DATA_PRIORITY_SHIFT            13
-       __le16 physical_q0;
-       __le16 rec_rr_tov_timer_val;
-       struct fc_addr_nw s_id;
-       u8 max_conc_seqs_c3;
-       struct fc_addr_nw d_id;
-       u8 flags;
-#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONT_INCR_SEQ_CNT_MASK  0x1
-#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONT_INCR_SEQ_CNT_SHIFT 0
-#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONF_REQ_MASK           0x1
-#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONF_REQ_SHIFT          1
-#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_REC_VALID_MASK          0x1
-#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_REC_VALID_SHIFT         2
-#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_MASK          0x1
-#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_SHIFT         3
-#define FCOE_CONN_OFFLOAD_RAMROD_DATA_MODE_MASK                 0x3
-#define FCOE_CONN_OFFLOAD_RAMROD_DATA_MODE_SHIFT                4
-#define FCOE_CONN_OFFLOAD_RAMROD_DATA_RESERVED0_MASK            0x3
-#define FCOE_CONN_OFFLOAD_RAMROD_DATA_RESERVED0_SHIFT           6
-       __le16 conn_id;
-       u8 def_q_idx;
-       u8 reserved[5];
-};
-
-/* FCoE terminate connection request */
-struct fcoe_conn_terminate_ramrod_data {
-       struct regpair terminate_params_addr;
-};
-
-struct fcoe_slow_sgl_ctx {
-       struct regpair base_sgl_addr;
-       __le16 curr_sge_off;
-       __le16 remainder_num_sges;
-       __le16 curr_sgl_index;
-       __le16 reserved;
-};
-
-union fcoe_dix_desc_ctx {
-       struct fcoe_slow_sgl_ctx dix_sgl;
-       struct scsi_sge cached_dix_sge;
+/* The fcoe storm task context protection-information of Ystorm */
+struct protection_info_ctx {
+       __le16 flags;
+#define PROTECTION_INFO_CTX_HOST_INTERFACE_MASK                0x3
+#define PROTECTION_INFO_CTX_HOST_INTERFACE_SHIFT       0
+#define PROTECTION_INFO_CTX_DIF_TO_PEER_MASK           0x1
+#define PROTECTION_INFO_CTX_DIF_TO_PEER_SHIFT          2
+#define PROTECTION_INFO_CTX_VALIDATE_DIX_APP_TAG_MASK  0x1
+#define PROTECTION_INFO_CTX_VALIDATE_DIX_APP_TAG_SHIFT 3
+#define PROTECTION_INFO_CTX_INTERVAL_SIZE_LOG_MASK     0xF
+#define PROTECTION_INFO_CTX_INTERVAL_SIZE_LOG_SHIFT    4
+#define PROTECTION_INFO_CTX_VALIDATE_DIX_REF_TAG_MASK  0x1
+#define PROTECTION_INFO_CTX_VALIDATE_DIX_REF_TAG_SHIFT 8
+#define PROTECTION_INFO_CTX_RESERVED0_MASK             0x7F
+#define PROTECTION_INFO_CTX_RESERVED0_SHIFT            9
+       u8 dix_block_size;
+       u8 dst_size;
 };
 
-struct fcoe_fast_sgl_ctx {
-       struct regpair sgl_start_addr;
-       __le32 sgl_byte_offset;
-       __le16 task_reuse_cnt;
-       __le16 init_offset_in_first_sge;
+/* The fcoe storm task context protection-information of Ystorm */
+union protection_info_union_ctx {
+       struct protection_info_ctx info;
+       __le32 value;
 };
 
+/* FCP CMD payload */
 struct fcoe_fcp_cmd_payload {
        __le32 opaque[8];
 };
 
+/* FCP RSP payload */
 struct fcoe_fcp_rsp_payload {
        __le32 opaque[6];
 };
 
-struct fcoe_fcp_xfer_payload {
-       __le32 opaque[3];
-};
-
-/* FCoE firmware function init */
-struct fcoe_init_func_ramrod_data {
-       struct scsi_init_func_params func_params;
-       struct scsi_init_func_queues q_params;
-       __le16 mtu;
-       __le16 sq_num_pages_in_pbl;
-       __le32 reserved;
-};
-
-/* FCoE: Mode of the connection: Target or Initiator or both */
-enum fcoe_mode_type {
-       FCOE_INITIATOR_MODE = 0x0,
-       FCOE_TARGET_MODE = 0x1,
-       FCOE_BOTH_OR_NOT_CHOSEN = 0x3,
-       MAX_FCOE_MODE_TYPE
-};
-
-struct fcoe_rx_stat {
-       struct regpair fcoe_rx_byte_cnt;
-       struct regpair fcoe_rx_data_pkt_cnt;
-       struct regpair fcoe_rx_xfer_pkt_cnt;
-       struct regpair fcoe_rx_other_pkt_cnt;
-       __le32 fcoe_silent_drop_pkt_cmdq_full_cnt;
-       __le32 fcoe_silent_drop_pkt_rq_full_cnt;
-       __le32 fcoe_silent_drop_pkt_crc_error_cnt;
-       __le32 fcoe_silent_drop_pkt_task_invalid_cnt;
-       __le32 fcoe_silent_drop_total_pkt_cnt;
-       __le32 rsrv;
-};
-
-struct fcoe_stat_ramrod_data {
-       struct regpair stat_params_addr;
-};
-
-struct protection_info_ctx {
-       __le16 flags;
-#define PROTECTION_INFO_CTX_HOST_INTERFACE_MASK        0x3
-#define PROTECTION_INFO_CTX_HOST_INTERFACE_SHIFT       0
-#define PROTECTION_INFO_CTX_DIF_TO_PEER_MASK           0x1
-#define PROTECTION_INFO_CTX_DIF_TO_PEER_SHIFT          2
-#define PROTECTION_INFO_CTX_VALIDATE_DIX_APP_TAG_MASK  0x1
-#define PROTECTION_INFO_CTX_VALIDATE_DIX_APP_TAG_SHIFT 3
-#define PROTECTION_INFO_CTX_INTERVAL_SIZE_LOG_MASK     0xF
-#define PROTECTION_INFO_CTX_INTERVAL_SIZE_LOG_SHIFT    4
-#define PROTECTION_INFO_CTX_VALIDATE_DIX_REF_TAG_MASK  0x1
-#define PROTECTION_INFO_CTX_VALIDATE_DIX_REF_TAG_SHIFT 8
-#define PROTECTION_INFO_CTX_RESERVED0_MASK             0x7F
-#define PROTECTION_INFO_CTX_RESERVED0_SHIFT            9
-       u8 dix_block_size;
-       u8 dst_size;
-};
-
-union protection_info_union_ctx {
-       struct protection_info_ctx info;
-       __le32 value;
-};
-
+/* FCP RSP payload */
 struct fcp_rsp_payload_padded {
        struct fcoe_fcp_rsp_payload rsp_payload;
        __le32 reserved[2];
 };
 
+/* FCP RSP payload */
+struct fcoe_fcp_xfer_payload {
+       __le32 opaque[3];
+};
+
+/* FCP RSP payload */
 struct fcp_xfer_payload_padded {
        struct fcoe_fcp_xfer_payload xfer_payload;
        __le32 reserved[5];
 };
 
+/* Task params */
 struct fcoe_tx_data_params {
        __le32 data_offset;
        __le32 offset_in_io;
        u8 flags;
-#define FCOE_TX_DATA_PARAMS_OFFSET_IN_IO_VALID_MASK  0x1
-#define FCOE_TX_DATA_PARAMS_OFFSET_IN_IO_VALID_SHIFT 0
-#define FCOE_TX_DATA_PARAMS_DROP_DATA_MASK           0x1
-#define FCOE_TX_DATA_PARAMS_DROP_DATA_SHIFT          1
-#define FCOE_TX_DATA_PARAMS_AFTER_SEQ_REC_MASK       0x1
-#define FCOE_TX_DATA_PARAMS_AFTER_SEQ_REC_SHIFT      2
-#define FCOE_TX_DATA_PARAMS_RESERVED0_MASK           0x1F
-#define FCOE_TX_DATA_PARAMS_RESERVED0_SHIFT          3
+#define FCOE_TX_DATA_PARAMS_OFFSET_IN_IO_VALID_MASK    0x1
+#define FCOE_TX_DATA_PARAMS_OFFSET_IN_IO_VALID_SHIFT   0
+#define FCOE_TX_DATA_PARAMS_DROP_DATA_MASK             0x1
+#define FCOE_TX_DATA_PARAMS_DROP_DATA_SHIFT            1
+#define FCOE_TX_DATA_PARAMS_AFTER_SEQ_REC_MASK         0x1
+#define FCOE_TX_DATA_PARAMS_AFTER_SEQ_REC_SHIFT                2
+#define FCOE_TX_DATA_PARAMS_RESERVED0_MASK             0x1F
+#define FCOE_TX_DATA_PARAMS_RESERVED0_SHIFT            3
        u8 dif_residual;
        __le16 seq_cnt;
        __le16 single_sge_saved_offset;
@@ -227,6 +88,7 @@ struct fcoe_tx_data_params {
        __le16 reserved3;
 };
 
+/* Middle path parameters: FC header fields provided by the driver */
 struct fcoe_tx_mid_path_params {
        __le32 parameter;
        u8 r_ctl;
@@ -237,11 +99,13 @@ struct fcoe_tx_mid_path_params {
        __le16 ox_id;
 };
 
+/* Task params */
 struct fcoe_tx_params {
        struct fcoe_tx_data_params data;
        struct fcoe_tx_mid_path_params mid_path;
 };
 
+/* Union of FCP CMD payload \ TX params \ ABTS \ Cleanup */
 union fcoe_tx_info_union_ctx {
        struct fcoe_fcp_cmd_payload fcp_cmd_payload;
        struct fcp_rsp_payload_padded fcp_rsp_payload;
@@ -249,13 +113,29 @@ union fcoe_tx_info_union_ctx {
        struct fcoe_tx_params tx_params;
 };
 
+/* Data sgl */
+struct fcoe_slow_sgl_ctx {
+       struct regpair base_sgl_addr;
+       __le16 curr_sge_off;
+       __le16 remainder_num_sges;
+       __le16 curr_sgl_index;
+       __le16 reserved;
+};
+
+/* Union of DIX SGL \ cached DIX sges */
+union fcoe_dix_desc_ctx {
+       struct fcoe_slow_sgl_ctx dix_sgl;
+       struct scsi_sge cached_dix_sge;
+};
+
+/* The fcoe storm task context of Ystorm */
 struct ystorm_fcoe_task_st_ctx {
        u8 task_type;
        u8 sgl_mode;
-#define YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_MASK  0x1
-#define YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_SHIFT 0
-#define YSTORM_FCOE_TASK_ST_CTX_RSRV_MASK         0x7F
-#define YSTORM_FCOE_TASK_ST_CTX_RSRV_SHIFT        1
+#define YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_MASK       0x1
+#define YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_SHIFT      0
+#define YSTORM_FCOE_TASK_ST_CTX_RSRV_MASK              0x7F
+#define YSTORM_FCOE_TASK_ST_CTX_RSRV_SHIFT             1
        u8 cached_dix_sge;
        u8 expect_first_xfer;
        __le32 num_pbf_zero_write;
@@ -272,49 +152,49 @@ struct ystorm_fcoe_task_st_ctx {
        u8 reserved2[8];
 };
 
-struct ystorm_fcoe_task_ag_ctx {
+struct e4_ystorm_fcoe_task_ag_ctx {
        u8 byte0;
        u8 byte1;
        __le16 word0;
        u8 flags0;
-#define YSTORM_FCOE_TASK_AG_CTX_NIBBLE0_MASK     0xF
-#define YSTORM_FCOE_TASK_AG_CTX_NIBBLE0_SHIFT    0
-#define YSTORM_FCOE_TASK_AG_CTX_BIT0_MASK        0x1
-#define YSTORM_FCOE_TASK_AG_CTX_BIT0_SHIFT       4
-#define YSTORM_FCOE_TASK_AG_CTX_BIT1_MASK        0x1
-#define YSTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT       5
-#define YSTORM_FCOE_TASK_AG_CTX_BIT2_MASK        0x1
-#define YSTORM_FCOE_TASK_AG_CTX_BIT2_SHIFT       6
-#define YSTORM_FCOE_TASK_AG_CTX_BIT3_MASK        0x1
-#define YSTORM_FCOE_TASK_AG_CTX_BIT3_SHIFT       7
+#define E4_YSTORM_FCOE_TASK_AG_CTX_NIBBLE0_MASK                0xF
+#define E4_YSTORM_FCOE_TASK_AG_CTX_NIBBLE0_SHIFT       0
+#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT0_MASK           0x1
+#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT0_SHIFT          4
+#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT1_MASK           0x1
+#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT          5
+#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT2_MASK           0x1
+#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT2_SHIFT          6
+#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT3_MASK           0x1
+#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT3_SHIFT          7
        u8 flags1;
-#define YSTORM_FCOE_TASK_AG_CTX_CF0_MASK         0x3
-#define YSTORM_FCOE_TASK_AG_CTX_CF0_SHIFT        0
-#define YSTORM_FCOE_TASK_AG_CTX_CF1_MASK         0x3
-#define YSTORM_FCOE_TASK_AG_CTX_CF1_SHIFT        2
-#define YSTORM_FCOE_TASK_AG_CTX_CF2SPECIAL_MASK  0x3
-#define YSTORM_FCOE_TASK_AG_CTX_CF2SPECIAL_SHIFT 4
-#define YSTORM_FCOE_TASK_AG_CTX_CF0EN_MASK       0x1
-#define YSTORM_FCOE_TASK_AG_CTX_CF0EN_SHIFT      6
-#define YSTORM_FCOE_TASK_AG_CTX_CF1EN_MASK       0x1
-#define YSTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT      7
+#define E4_YSTORM_FCOE_TASK_AG_CTX_CF0_MASK            0x3
+#define E4_YSTORM_FCOE_TASK_AG_CTX_CF0_SHIFT           0
+#define E4_YSTORM_FCOE_TASK_AG_CTX_CF1_MASK            0x3
+#define E4_YSTORM_FCOE_TASK_AG_CTX_CF1_SHIFT           2
+#define E4_YSTORM_FCOE_TASK_AG_CTX_CF2SPECIAL_MASK     0x3
+#define E4_YSTORM_FCOE_TASK_AG_CTX_CF2SPECIAL_SHIFT    4
+#define E4_YSTORM_FCOE_TASK_AG_CTX_CF0EN_MASK          0x1
+#define E4_YSTORM_FCOE_TASK_AG_CTX_CF0EN_SHIFT         6
+#define E4_YSTORM_FCOE_TASK_AG_CTX_CF1EN_MASK          0x1
+#define E4_YSTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT         7
        u8 flags2;
-#define YSTORM_FCOE_TASK_AG_CTX_BIT4_MASK        0x1
-#define YSTORM_FCOE_TASK_AG_CTX_BIT4_SHIFT       0
-#define YSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK     0x1
-#define YSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT    1
-#define YSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK     0x1
-#define YSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT    2
-#define YSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK     0x1
-#define YSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT    3
-#define YSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK     0x1
-#define YSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT    4
-#define YSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK     0x1
-#define YSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT    5
-#define YSTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK     0x1
-#define YSTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT    6
-#define YSTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK     0x1
-#define YSTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT    7
+#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT4_MASK           0x1
+#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT4_SHIFT          0
+#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK                0x1
+#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT       1
+#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK                0x1
+#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT       2
+#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK                0x1
+#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT       3
+#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK                0x1
+#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT       4
+#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK                0x1
+#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT       5
+#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK                0x1
+#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT       6
+#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK                0x1
+#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT       7
        u8 byte2;
        __le32 reg0;
        u8 byte3;
@@ -328,73 +208,73 @@ struct ystorm_fcoe_task_ag_ctx {
        __le32 reg2;
 };
 
-struct tstorm_fcoe_task_ag_ctx {
+struct e4_tstorm_fcoe_task_ag_ctx {
        u8 reserved;
        u8 byte1;
        __le16 icid;
        u8 flags0;
-#define TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK     0xF
-#define TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT    0
-#define TSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK        0x1
-#define TSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT       4
-#define TSTORM_FCOE_TASK_AG_CTX_BIT1_MASK                0x1
-#define TSTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT               5
-#define TSTORM_FCOE_TASK_AG_CTX_WAIT_ABTS_RSP_F_MASK     0x1
-#define TSTORM_FCOE_TASK_AG_CTX_WAIT_ABTS_RSP_F_SHIFT    6
-#define TSTORM_FCOE_TASK_AG_CTX_VALID_MASK               0x1
-#define TSTORM_FCOE_TASK_AG_CTX_VALID_SHIFT              7
+#define E4_TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK                0xF
+#define E4_TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT       0
+#define E4_TSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK           0x1
+#define E4_TSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT          4
+#define E4_TSTORM_FCOE_TASK_AG_CTX_BIT1_MASK                   0x1
+#define E4_TSTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT                  5
+#define E4_TSTORM_FCOE_TASK_AG_CTX_WAIT_ABTS_RSP_F_MASK                0x1
+#define E4_TSTORM_FCOE_TASK_AG_CTX_WAIT_ABTS_RSP_F_SHIFT       6
+#define E4_TSTORM_FCOE_TASK_AG_CTX_VALID_MASK                  0x1
+#define E4_TSTORM_FCOE_TASK_AG_CTX_VALID_SHIFT                 7
        u8 flags1;
-#define TSTORM_FCOE_TASK_AG_CTX_FALSE_RR_TOV_MASK        0x1
-#define TSTORM_FCOE_TASK_AG_CTX_FALSE_RR_TOV_SHIFT       0
-#define TSTORM_FCOE_TASK_AG_CTX_BIT5_MASK                0x1
-#define TSTORM_FCOE_TASK_AG_CTX_BIT5_SHIFT               1
-#define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_MASK       0x3
-#define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_SHIFT      2
-#define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_MASK           0x3
-#define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_SHIFT          4
-#define TSTORM_FCOE_TASK_AG_CTX_CF2_MASK                 0x3
-#define TSTORM_FCOE_TASK_AG_CTX_CF2_SHIFT                6
+#define E4_TSTORM_FCOE_TASK_AG_CTX_FALSE_RR_TOV_MASK   0x1
+#define E4_TSTORM_FCOE_TASK_AG_CTX_FALSE_RR_TOV_SHIFT  0
+#define E4_TSTORM_FCOE_TASK_AG_CTX_BIT5_MASK           0x1
+#define E4_TSTORM_FCOE_TASK_AG_CTX_BIT5_SHIFT          1
+#define E4_TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_MASK  0x3
+#define E4_TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_SHIFT 2
+#define E4_TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_MASK      0x3
+#define E4_TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_SHIFT     4
+#define E4_TSTORM_FCOE_TASK_AG_CTX_CF2_MASK            0x3
+#define E4_TSTORM_FCOE_TASK_AG_CTX_CF2_SHIFT           6
        u8 flags2;
-#define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_MASK      0x3
-#define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_SHIFT     0
-#define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_MASK       0x3
-#define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_SHIFT      2
-#define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_MASK         0x3
-#define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_SHIFT        4
-#define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_MASK     0x3
-#define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_SHIFT    6
+#define E4_TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_MASK         0x3
+#define E4_TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_SHIFT                0
+#define E4_TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_MASK          0x3
+#define E4_TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_SHIFT         2
+#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_MASK            0x3
+#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_SHIFT           4
+#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_MASK                0x3
+#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_SHIFT       6
        u8 flags3;
-#define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_MASK       0x3
-#define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_SHIFT      0
-#define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_EN_MASK    0x1
-#define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_EN_SHIFT   2
-#define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_EN_MASK        0x1
-#define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_EN_SHIFT       3
-#define TSTORM_FCOE_TASK_AG_CTX_CF2EN_MASK               0x1
-#define TSTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT              4
-#define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_EN_MASK   0x1
-#define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_EN_SHIFT  5
-#define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_MASK    0x1
-#define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_SHIFT   6
-#define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_EN_MASK      0x1
-#define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_EN_SHIFT     7
+#define E4_TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_MASK          0x3
+#define E4_TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_SHIFT         0
+#define E4_TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_EN_MASK       0x1
+#define E4_TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_EN_SHIFT      2
+#define E4_TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_EN_MASK           0x1
+#define E4_TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_EN_SHIFT          3
+#define E4_TSTORM_FCOE_TASK_AG_CTX_CF2EN_MASK                  0x1
+#define E4_TSTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT                 4
+#define E4_TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_EN_MASK      0x1
+#define E4_TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_EN_SHIFT     5
+#define E4_TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_MASK       0x1
+#define E4_TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_SHIFT      6
+#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_EN_MASK         0x1
+#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_EN_SHIFT                7
        u8 flags4;
-#define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_EN_MASK  0x1
-#define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_EN_SHIFT 0
-#define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_EN_MASK    0x1
-#define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_EN_SHIFT   1
-#define TSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK             0x1
-#define TSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT            2
-#define TSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK             0x1
-#define TSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT            3
-#define TSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK             0x1
-#define TSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT            4
-#define TSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK             0x1
-#define TSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT            5
-#define TSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK             0x1
-#define TSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT            6
-#define TSTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK             0x1
-#define TSTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT            7
+#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_EN_MASK     0x1
+#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_EN_SHIFT    0
+#define E4_TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_EN_MASK       0x1
+#define E4_TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_EN_SHIFT      1
+#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK                        0x1
+#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT               2
+#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK                        0x1
+#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT               3
+#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK                        0x1
+#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT               4
+#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK                        0x1
+#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT               5
+#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK                        0x1
+#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT               6
+#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK                        0x1
+#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT               7
        u8 cleanup_state;
        __le16 last_sent_tid;
        __le32 rec_rr_tov_exp_timeout;
@@ -407,25 +287,46 @@ struct tstorm_fcoe_task_ag_ctx {
        __le32 data_offset_next;
 };
 
+/* Cached data sges */
+struct fcoe_exp_ro {
+       __le32 data_offset;
+       __le32 reserved;
+};
+
+/* Union of Cleanup address \ expected relative offsets */
+union fcoe_cleanup_addr_exp_ro_union {
+       struct regpair abts_rsp_fc_payload_hi;
+       struct fcoe_exp_ro exp_ro;
+};
+
+/* Fields coppied from ABTSrsp pckt */
+struct fcoe_abts_pkt {
+       __le32 abts_rsp_fc_payload_lo;
+       __le16 abts_rsp_rx_id;
+       u8 abts_rsp_rctl;
+       u8 reserved2;
+};
+
+/* FW read- write (modifyable) part The fcoe task storm context of Tstorm */
 struct fcoe_tstorm_fcoe_task_st_ctx_read_write {
        union fcoe_cleanup_addr_exp_ro_union cleanup_addr_exp_ro_union;
        __le16 flags;
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE_MASK       0x1
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE_SHIFT      0
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME_MASK   0x1
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME_SHIFT  1
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_ACTIVE_MASK        0x1
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_ACTIVE_SHIFT       2
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_TIMEOUT_MASK       0x1
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_TIMEOUT_SHIFT      3
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SINGLE_PKT_IN_EX_MASK  0x1
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SINGLE_PKT_IN_EX_SHIFT 4
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_OOO_RX_SEQ_STAT_MASK   0x1
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_OOO_RX_SEQ_STAT_SHIFT  5
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_CQ_ADD_ADV_MASK        0x3
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_CQ_ADD_ADV_SHIFT       6
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RSRV1_MASK             0xFF
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RSRV1_SHIFT            8
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE_MASK       0x1
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE_SHIFT      0
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME_MASK   0x1
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME_SHIFT  1
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_ACTIVE_MASK                0x1
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_ACTIVE_SHIFT       2
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_TIMEOUT_MASK       0x1
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_TIMEOUT_SHIFT      3
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SINGLE_PKT_IN_EX_MASK  0x1
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SINGLE_PKT_IN_EX_SHIFT 4
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_OOO_RX_SEQ_STAT_MASK   0x1
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_OOO_RX_SEQ_STAT_SHIFT  5
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_CQ_ADD_ADV_MASK                0x3
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_CQ_ADD_ADV_SHIFT       6
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RSRV1_MASK             0xFF
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RSRV1_SHIFT            8
        __le16 seq_cnt;
        u8 seq_id;
        u8 ooo_rx_seq_id;
@@ -436,6 +337,7 @@ struct fcoe_tstorm_fcoe_task_st_ctx_read_write {
        __le16 reserved1;
 };
 
+/* FW read only part The fcoe task storm context of Tstorm */
 struct fcoe_tstorm_fcoe_task_st_ctx_read_only {
        u8 task_type;
        u8 dev_type;
@@ -446,54 +348,55 @@ struct fcoe_tstorm_fcoe_task_st_ctx_read_only {
        __le32 rsrv;
 };
 
+/** The fcoe task storm context of Tstorm */
 struct tstorm_fcoe_task_st_ctx {
        struct fcoe_tstorm_fcoe_task_st_ctx_read_write read_write;
        struct fcoe_tstorm_fcoe_task_st_ctx_read_only read_only;
 };
 
-struct mstorm_fcoe_task_ag_ctx {
+struct e4_mstorm_fcoe_task_ag_ctx {
        u8 byte0;
        u8 byte1;
        __le16 icid;
        u8 flags0;
-#define MSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK    0xF
-#define MSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT   0
-#define MSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK       0x1
-#define MSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT      4
-#define MSTORM_FCOE_TASK_AG_CTX_CQE_PLACED_MASK         0x1
-#define MSTORM_FCOE_TASK_AG_CTX_CQE_PLACED_SHIFT        5
-#define MSTORM_FCOE_TASK_AG_CTX_BIT2_MASK               0x1
-#define MSTORM_FCOE_TASK_AG_CTX_BIT2_SHIFT              6
-#define MSTORM_FCOE_TASK_AG_CTX_BIT3_MASK               0x1
-#define MSTORM_FCOE_TASK_AG_CTX_BIT3_SHIFT              7
+#define E4_MSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK                0xF
+#define E4_MSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT       0
+#define E4_MSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK           0x1
+#define E4_MSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT          4
+#define E4_MSTORM_FCOE_TASK_AG_CTX_CQE_PLACED_MASK             0x1
+#define E4_MSTORM_FCOE_TASK_AG_CTX_CQE_PLACED_SHIFT            5
+#define E4_MSTORM_FCOE_TASK_AG_CTX_BIT2_MASK                   0x1
+#define E4_MSTORM_FCOE_TASK_AG_CTX_BIT2_SHIFT                  6
+#define E4_MSTORM_FCOE_TASK_AG_CTX_BIT3_MASK                   0x1
+#define E4_MSTORM_FCOE_TASK_AG_CTX_BIT3_SHIFT                  7
        u8 flags1;
-#define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_MASK      0x3
-#define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_SHIFT     0
-#define MSTORM_FCOE_TASK_AG_CTX_CF1_MASK                0x3
-#define MSTORM_FCOE_TASK_AG_CTX_CF1_SHIFT               2
-#define MSTORM_FCOE_TASK_AG_CTX_CF2_MASK                0x3
-#define MSTORM_FCOE_TASK_AG_CTX_CF2_SHIFT               4
-#define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_MASK   0x1
-#define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_SHIFT  6
-#define MSTORM_FCOE_TASK_AG_CTX_CF1EN_MASK              0x1
-#define MSTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT             7
+#define E4_MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_MASK          0x3
+#define E4_MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_SHIFT         0
+#define E4_MSTORM_FCOE_TASK_AG_CTX_CF1_MASK                    0x3
+#define E4_MSTORM_FCOE_TASK_AG_CTX_CF1_SHIFT                   2
+#define E4_MSTORM_FCOE_TASK_AG_CTX_CF2_MASK                    0x3
+#define E4_MSTORM_FCOE_TASK_AG_CTX_CF2_SHIFT                   4
+#define E4_MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_MASK       0x1
+#define E4_MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_SHIFT      6
+#define E4_MSTORM_FCOE_TASK_AG_CTX_CF1EN_MASK                  0x1
+#define E4_MSTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT                 7
        u8 flags2;
-#define MSTORM_FCOE_TASK_AG_CTX_CF2EN_MASK              0x1
-#define MSTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT             0
-#define MSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK            0x1
-#define MSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT           1
-#define MSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK            0x1
-#define MSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT           2
-#define MSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK            0x1
-#define MSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT           3
-#define MSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK            0x1
-#define MSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT           4
-#define MSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK            0x1
-#define MSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT           5
-#define MSTORM_FCOE_TASK_AG_CTX_XFER_PLACEMENT_EN_MASK  0x1
-#define MSTORM_FCOE_TASK_AG_CTX_XFER_PLACEMENT_EN_SHIFT 6
-#define MSTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK            0x1
-#define MSTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT           7
+#define E4_MSTORM_FCOE_TASK_AG_CTX_CF2EN_MASK                  0x1
+#define E4_MSTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT                 0
+#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK                        0x1
+#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT               1
+#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK                        0x1
+#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT               2
+#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK                        0x1
+#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT               3
+#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK                        0x1
+#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT               4
+#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK                        0x1
+#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT               5
+#define E4_MSTORM_FCOE_TASK_AG_CTX_XFER_PLACEMENT_EN_MASK      0x1
+#define E4_MSTORM_FCOE_TASK_AG_CTX_XFER_PLACEMENT_EN_SHIFT     6
+#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK                        0x1
+#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT               7
        u8 cleanup_state;
        __le32 received_bytes;
        u8 byte3;
@@ -507,6 +410,7 @@ struct mstorm_fcoe_task_ag_ctx {
        __le32 reg2;
 };
 
+/* The fcoe task storm context of Mstorm */
 struct mstorm_fcoe_task_st_ctx {
        struct regpair rsp_buf_addr;
        __le32 rsrv[2];
@@ -515,79 +419,79 @@ struct mstorm_fcoe_task_st_ctx {
        __le32 data_buffer_offset;
        __le16 parent_id;
        __le16 flags;
-#define MSTORM_FCOE_TASK_ST_CTX_INTERVAL_SIZE_LOG_MASK     0xF
-#define MSTORM_FCOE_TASK_ST_CTX_INTERVAL_SIZE_LOG_SHIFT    0
-#define MSTORM_FCOE_TASK_ST_CTX_HOST_INTERFACE_MASK        0x3
-#define MSTORM_FCOE_TASK_ST_CTX_HOST_INTERFACE_SHIFT       4
-#define MSTORM_FCOE_TASK_ST_CTX_DIF_TO_PEER_MASK           0x1
-#define MSTORM_FCOE_TASK_ST_CTX_DIF_TO_PEER_SHIFT          6
-#define MSTORM_FCOE_TASK_ST_CTX_MP_INCLUDE_FC_HEADER_MASK  0x1
-#define MSTORM_FCOE_TASK_ST_CTX_MP_INCLUDE_FC_HEADER_SHIFT 7
-#define MSTORM_FCOE_TASK_ST_CTX_DIX_BLOCK_SIZE_MASK        0x3
-#define MSTORM_FCOE_TASK_ST_CTX_DIX_BLOCK_SIZE_SHIFT       8
-#define MSTORM_FCOE_TASK_ST_CTX_VALIDATE_DIX_REF_TAG_MASK  0x1
-#define MSTORM_FCOE_TASK_ST_CTX_VALIDATE_DIX_REF_TAG_SHIFT 10
-#define MSTORM_FCOE_TASK_ST_CTX_DIX_CACHED_SGE_FLG_MASK    0x1
-#define MSTORM_FCOE_TASK_ST_CTX_DIX_CACHED_SGE_FLG_SHIFT   11
-#define MSTORM_FCOE_TASK_ST_CTX_DIF_SUPPORTED_MASK         0x1
-#define MSTORM_FCOE_TASK_ST_CTX_DIF_SUPPORTED_SHIFT        12
-#define MSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_MASK           0x1
-#define MSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_SHIFT          13
-#define MSTORM_FCOE_TASK_ST_CTX_RESERVED_MASK              0x3
-#define MSTORM_FCOE_TASK_ST_CTX_RESERVED_SHIFT             14
+#define MSTORM_FCOE_TASK_ST_CTX_INTERVAL_SIZE_LOG_MASK         0xF
+#define MSTORM_FCOE_TASK_ST_CTX_INTERVAL_SIZE_LOG_SHIFT                0
+#define MSTORM_FCOE_TASK_ST_CTX_HOST_INTERFACE_MASK            0x3
+#define MSTORM_FCOE_TASK_ST_CTX_HOST_INTERFACE_SHIFT           4
+#define MSTORM_FCOE_TASK_ST_CTX_DIF_TO_PEER_MASK               0x1
+#define MSTORM_FCOE_TASK_ST_CTX_DIF_TO_PEER_SHIFT              6
+#define MSTORM_FCOE_TASK_ST_CTX_MP_INCLUDE_FC_HEADER_MASK      0x1
+#define MSTORM_FCOE_TASK_ST_CTX_MP_INCLUDE_FC_HEADER_SHIFT     7
+#define MSTORM_FCOE_TASK_ST_CTX_DIX_BLOCK_SIZE_MASK            0x3
+#define MSTORM_FCOE_TASK_ST_CTX_DIX_BLOCK_SIZE_SHIFT           8
+#define MSTORM_FCOE_TASK_ST_CTX_VALIDATE_DIX_REF_TAG_MASK      0x1
+#define MSTORM_FCOE_TASK_ST_CTX_VALIDATE_DIX_REF_TAG_SHIFT     10
+#define MSTORM_FCOE_TASK_ST_CTX_DIX_CACHED_SGE_FLG_MASK                0x1
+#define MSTORM_FCOE_TASK_ST_CTX_DIX_CACHED_SGE_FLG_SHIFT       11
+#define MSTORM_FCOE_TASK_ST_CTX_DIF_SUPPORTED_MASK             0x1
+#define MSTORM_FCOE_TASK_ST_CTX_DIF_SUPPORTED_SHIFT            12
+#define MSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_MASK               0x1
+#define MSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_SHIFT              13
+#define MSTORM_FCOE_TASK_ST_CTX_RESERVED_MASK                  0x3
+#define MSTORM_FCOE_TASK_ST_CTX_RESERVED_SHIFT                 14
        struct scsi_cached_sges data_desc;
 };
 
-struct ustorm_fcoe_task_ag_ctx {
+struct e4_ustorm_fcoe_task_ag_ctx {
        u8 reserved;
        u8 byte1;
        __le16 icid;
        u8 flags0;
-#define USTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK  0xF
-#define USTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
-#define USTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK     0x1
-#define USTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT    4
-#define USTORM_FCOE_TASK_AG_CTX_BIT1_MASK             0x1
-#define USTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT            5
-#define USTORM_FCOE_TASK_AG_CTX_CF0_MASK              0x3
-#define USTORM_FCOE_TASK_AG_CTX_CF0_SHIFT             6
+#define E4_USTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK                0xF
+#define E4_USTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT       0
+#define E4_USTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK           0x1
+#define E4_USTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT          4
+#define E4_USTORM_FCOE_TASK_AG_CTX_BIT1_MASK                   0x1
+#define E4_USTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT                  5
+#define E4_USTORM_FCOE_TASK_AG_CTX_CF0_MASK                    0x3
+#define E4_USTORM_FCOE_TASK_AG_CTX_CF0_SHIFT                   6
        u8 flags1;
-#define USTORM_FCOE_TASK_AG_CTX_CF1_MASK              0x3
-#define USTORM_FCOE_TASK_AG_CTX_CF1_SHIFT             0
-#define USTORM_FCOE_TASK_AG_CTX_CF2_MASK              0x3
-#define USTORM_FCOE_TASK_AG_CTX_CF2_SHIFT             2
-#define USTORM_FCOE_TASK_AG_CTX_CF3_MASK              0x3
-#define USTORM_FCOE_TASK_AG_CTX_CF3_SHIFT             4
-#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_MASK     0x3
-#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_SHIFT    6
+#define E4_USTORM_FCOE_TASK_AG_CTX_CF1_MASK            0x3
+#define E4_USTORM_FCOE_TASK_AG_CTX_CF1_SHIFT           0
+#define E4_USTORM_FCOE_TASK_AG_CTX_CF2_MASK            0x3
+#define E4_USTORM_FCOE_TASK_AG_CTX_CF2_SHIFT           2
+#define E4_USTORM_FCOE_TASK_AG_CTX_CF3_MASK            0x3
+#define E4_USTORM_FCOE_TASK_AG_CTX_CF3_SHIFT           4
+#define E4_USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_MASK   0x3
+#define E4_USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_SHIFT  6
        u8 flags2;
-#define USTORM_FCOE_TASK_AG_CTX_CF0EN_MASK            0x1
-#define USTORM_FCOE_TASK_AG_CTX_CF0EN_SHIFT           0
-#define USTORM_FCOE_TASK_AG_CTX_CF1EN_MASK            0x1
-#define USTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT           1
-#define USTORM_FCOE_TASK_AG_CTX_CF2EN_MASK            0x1
-#define USTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT           2
-#define USTORM_FCOE_TASK_AG_CTX_CF3EN_MASK            0x1
-#define USTORM_FCOE_TASK_AG_CTX_CF3EN_SHIFT           3
-#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK  0x1
-#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4
-#define USTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK          0x1
-#define USTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT         5
-#define USTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK          0x1
-#define USTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT         6
-#define USTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK          0x1
-#define USTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT         7
+#define E4_USTORM_FCOE_TASK_AG_CTX_CF0EN_MASK                  0x1
+#define E4_USTORM_FCOE_TASK_AG_CTX_CF0EN_SHIFT                 0
+#define E4_USTORM_FCOE_TASK_AG_CTX_CF1EN_MASK                  0x1
+#define E4_USTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT                 1
+#define E4_USTORM_FCOE_TASK_AG_CTX_CF2EN_MASK                  0x1
+#define E4_USTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT                 2
+#define E4_USTORM_FCOE_TASK_AG_CTX_CF3EN_MASK                  0x1
+#define E4_USTORM_FCOE_TASK_AG_CTX_CF3EN_SHIFT                 3
+#define E4_USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK                0x1
+#define E4_USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT       4
+#define E4_USTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK                        0x1
+#define E4_USTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT               5
+#define E4_USTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK                        0x1
+#define E4_USTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT               6
+#define E4_USTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK                        0x1
+#define E4_USTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT               7
        u8 flags3;
-#define USTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK          0x1
-#define USTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT         0
-#define USTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK          0x1
-#define USTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT         1
-#define USTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK          0x1
-#define USTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT         2
-#define USTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK          0x1
-#define USTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT         3
-#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_TYPE_MASK   0xF
-#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT  4
+#define E4_USTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK                0x1
+#define E4_USTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT       0
+#define E4_USTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK                0x1
+#define E4_USTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT       1
+#define E4_USTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK                0x1
+#define E4_USTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT       2
+#define E4_USTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK                0x1
+#define E4_USTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT       3
+#define E4_USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF
+#define E4_USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT        4
        __le32 dif_err_intervals;
        __le32 dif_error_1st_interval;
        __le32 global_cq_num;
@@ -596,21 +500,189 @@ struct ustorm_fcoe_task_ag_ctx {
        __le32 reg5;
 };
 
-struct fcoe_task_context {
+/* FCoE task context */
+struct e4_fcoe_task_context {
        struct ystorm_fcoe_task_st_ctx ystorm_st_context;
        struct regpair ystorm_st_padding[2];
        struct tdif_task_context tdif_context;
-       struct ystorm_fcoe_task_ag_ctx ystorm_ag_context;
-       struct tstorm_fcoe_task_ag_ctx tstorm_ag_context;
+       struct e4_ystorm_fcoe_task_ag_ctx ystorm_ag_context;
+       struct e4_tstorm_fcoe_task_ag_ctx tstorm_ag_context;
        struct timers_context timer_context;
        struct tstorm_fcoe_task_st_ctx tstorm_st_context;
        struct regpair tstorm_st_padding[2];
-       struct mstorm_fcoe_task_ag_ctx mstorm_ag_context;
+       struct e4_mstorm_fcoe_task_ag_ctx mstorm_ag_context;
        struct mstorm_fcoe_task_st_ctx mstorm_st_context;
-       struct ustorm_fcoe_task_ag_ctx ustorm_ag_context;
+       struct e4_ustorm_fcoe_task_ag_ctx ustorm_ag_context;
        struct rdif_task_context rdif_context;
 };
 
+/* FCoE additional WQE (Sq/XferQ) information */
+union fcoe_additional_info_union {
+       __le32 previous_tid;
+       __le32 parent_tid;
+       __le32 burst_length;
+       __le32 seq_rec_updated_offset;
+};
+
+/* FCoE Ramrod Command IDs */
+enum fcoe_completion_status {
+       FCOE_COMPLETION_STATUS_SUCCESS,
+       FCOE_COMPLETION_STATUS_FCOE_VER_ERR,
+       FCOE_COMPLETION_STATUS_SRC_MAC_ADD_ARR_ERR,
+       MAX_FCOE_COMPLETION_STATUS
+};
+
+/* FC address (SID/DID) network presentation */
+struct fc_addr_nw {
+       u8 addr_lo;
+       u8 addr_mid;
+       u8 addr_hi;
+};
+
+/* FCoE connection offload */
+struct fcoe_conn_offload_ramrod_data {
+       struct regpair sq_pbl_addr;
+       struct regpair sq_curr_page_addr;
+       struct regpair sq_next_page_addr;
+       struct regpair xferq_pbl_addr;
+       struct regpair xferq_curr_page_addr;
+       struct regpair xferq_next_page_addr;
+       struct regpair respq_pbl_addr;
+       struct regpair respq_curr_page_addr;
+       struct regpair respq_next_page_addr;
+       __le16 dst_mac_addr_lo;
+       __le16 dst_mac_addr_mid;
+       __le16 dst_mac_addr_hi;
+       __le16 src_mac_addr_lo;
+       __le16 src_mac_addr_mid;
+       __le16 src_mac_addr_hi;
+       __le16 tx_max_fc_pay_len;
+       __le16 e_d_tov_timer_val;
+       __le16 rx_max_fc_pay_len;
+       __le16 vlan_tag;
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_VLAN_ID_MASK     0xFFF
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_VLAN_ID_SHIFT    0
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_CFI_MASK         0x1
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_CFI_SHIFT                12
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_PRIORITY_MASK    0x7
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_PRIORITY_SHIFT   13
+       __le16 physical_q0;
+       __le16 rec_rr_tov_timer_val;
+       struct fc_addr_nw s_id;
+       u8 max_conc_seqs_c3;
+       struct fc_addr_nw d_id;
+       u8 flags;
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONT_INCR_SEQ_CNT_MASK 0x1
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONT_INCR_SEQ_CNT_SHIFT        0
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONF_REQ_MASK          0x1
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONF_REQ_SHIFT         1
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_REC_VALID_MASK         0x1
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_REC_VALID_SHIFT                2
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_MASK         0x1
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_SHIFT                3
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_SINGLE_VLAN_MASK       0x1
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_SINGLE_VLAN_SHIFT      4
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_MODE_MASK                        0x3
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_MODE_SHIFT               5
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_RESERVED0_MASK           0x1
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_RESERVED0_SHIFT          7
+       __le16 conn_id;
+       u8 def_q_idx;
+       u8 reserved[5];
+};
+
+/* FCoE terminate connection request */
+struct fcoe_conn_terminate_ramrod_data {
+       struct regpair terminate_params_addr;
+};
+
+/* FCoE device type */
+enum fcoe_device_type {
+       FCOE_TASK_DEV_TYPE_DISK,
+       FCOE_TASK_DEV_TYPE_TAPE,
+       MAX_FCOE_DEVICE_TYPE
+};
+
+/* Data sgl */
+struct fcoe_fast_sgl_ctx {
+       struct regpair sgl_start_addr;
+       __le32 sgl_byte_offset;
+       __le16 task_reuse_cnt;
+       __le16 init_offset_in_first_sge;
+};
+
+/* FCoE firmware function init */
+struct fcoe_init_func_ramrod_data {
+       struct scsi_init_func_params func_params;
+       struct scsi_init_func_queues q_params;
+       __le16 mtu;
+       __le16 sq_num_pages_in_pbl;
+       __le32 reserved[3];
+};
+
+/* FCoE: Mode of the connection: Target or Initiator or both */
+enum fcoe_mode_type {
+       FCOE_INITIATOR_MODE = 0x0,
+       FCOE_TARGET_MODE = 0x1,
+       FCOE_BOTH_OR_NOT_CHOSEN = 0x3,
+       MAX_FCOE_MODE_TYPE
+};
+
+/* Per PF FCoE receive path statistics - tStorm RAM structure */
+struct fcoe_rx_stat {
+       struct regpair fcoe_rx_byte_cnt;
+       struct regpair fcoe_rx_data_pkt_cnt;
+       struct regpair fcoe_rx_xfer_pkt_cnt;
+       struct regpair fcoe_rx_other_pkt_cnt;
+       __le32 fcoe_silent_drop_pkt_cmdq_full_cnt;
+       __le32 fcoe_silent_drop_pkt_rq_full_cnt;
+       __le32 fcoe_silent_drop_pkt_crc_error_cnt;
+       __le32 fcoe_silent_drop_pkt_task_invalid_cnt;
+       __le32 fcoe_silent_drop_total_pkt_cnt;
+       __le32 rsrv;
+};
+
+/* FCoE SQE request type */
+enum fcoe_sqe_request_type {
+       SEND_FCOE_CMD,
+       SEND_FCOE_MIDPATH,
+       SEND_FCOE_ABTS_REQUEST,
+       FCOE_EXCHANGE_CLEANUP,
+       FCOE_SEQUENCE_RECOVERY,
+       SEND_FCOE_XFER_RDY,
+       SEND_FCOE_RSP,
+       SEND_FCOE_RSP_WITH_SENSE_DATA,
+       SEND_FCOE_TARGET_DATA,
+       SEND_FCOE_INITIATOR_DATA,
+       SEND_FCOE_XFER_CONTINUATION_RDY,
+       SEND_FCOE_TARGET_ABTS_RSP,
+       MAX_FCOE_SQE_REQUEST_TYPE
+};
+
+/* FCoe statistics request */
+struct fcoe_stat_ramrod_data {
+       struct regpair stat_params_addr;
+};
+
+/* FCoE task type */
+enum fcoe_task_type {
+       FCOE_TASK_TYPE_WRITE_INITIATOR,
+       FCOE_TASK_TYPE_READ_INITIATOR,
+       FCOE_TASK_TYPE_MIDPATH,
+       FCOE_TASK_TYPE_UNSOLICITED,
+       FCOE_TASK_TYPE_ABTS,
+       FCOE_TASK_TYPE_EXCHANGE_CLEANUP,
+       FCOE_TASK_TYPE_SEQUENCE_CLEANUP,
+       FCOE_TASK_TYPE_WRITE_TARGET,
+       FCOE_TASK_TYPE_READ_TARGET,
+       FCOE_TASK_TYPE_RSP,
+       FCOE_TASK_TYPE_RSP_SENSE_DATA,
+       FCOE_TASK_TYPE_ABTS_TARGET,
+       FCOE_TASK_TYPE_ENUM_SIZE,
+       MAX_FCOE_TASK_TYPE
+};
+
+/* Per PF FCoE transmit path statistics - pStorm RAM structure */
 struct fcoe_tx_stat {
        struct regpair fcoe_tx_byte_cnt;
        struct regpair fcoe_tx_data_pkt_cnt;
@@ -618,51 +690,55 @@ struct fcoe_tx_stat {
        struct regpair fcoe_tx_other_pkt_cnt;
 };
 
+/* FCoE SQ/XferQ element */
 struct fcoe_wqe {
        __le16 task_id;
        __le16 flags;
-#define FCOE_WQE_REQ_TYPE_MASK       0xF
-#define FCOE_WQE_REQ_TYPE_SHIFT      0
-#define FCOE_WQE_SGL_MODE_MASK       0x1
-#define FCOE_WQE_SGL_MODE_SHIFT      4
-#define FCOE_WQE_CONTINUATION_MASK   0x1
-#define FCOE_WQE_CONTINUATION_SHIFT  5
-#define FCOE_WQE_SEND_AUTO_RSP_MASK  0x1
-#define FCOE_WQE_SEND_AUTO_RSP_SHIFT 6
-#define FCOE_WQE_RESERVED_MASK       0x1
-#define FCOE_WQE_RESERVED_SHIFT      7
-#define FCOE_WQE_NUM_SGES_MASK       0xF
-#define FCOE_WQE_NUM_SGES_SHIFT      8
-#define FCOE_WQE_RESERVED1_MASK      0xF
-#define FCOE_WQE_RESERVED1_SHIFT     12
+#define FCOE_WQE_REQ_TYPE_MASK         0xF
+#define FCOE_WQE_REQ_TYPE_SHIFT                0
+#define FCOE_WQE_SGL_MODE_MASK         0x1
+#define FCOE_WQE_SGL_MODE_SHIFT                4
+#define FCOE_WQE_CONTINUATION_MASK     0x1
+#define FCOE_WQE_CONTINUATION_SHIFT    5
+#define FCOE_WQE_SEND_AUTO_RSP_MASK    0x1
+#define FCOE_WQE_SEND_AUTO_RSP_SHIFT   6
+#define FCOE_WQE_RESERVED_MASK         0x1
+#define FCOE_WQE_RESERVED_SHIFT                7
+#define FCOE_WQE_NUM_SGES_MASK         0xF
+#define FCOE_WQE_NUM_SGES_SHIFT                8
+#define FCOE_WQE_RESERVED1_MASK                0xF
+#define FCOE_WQE_RESERVED1_SHIFT       12
        union fcoe_additional_info_union additional_info_union;
 };
 
+/* FCoE XFRQ element */
 struct xfrqe_prot_flags {
        u8 flags;
-#define XFRQE_PROT_FLAGS_PROT_INTERVAL_SIZE_LOG_MASK  0xF
-#define XFRQE_PROT_FLAGS_PROT_INTERVAL_SIZE_LOG_SHIFT 0
-#define XFRQE_PROT_FLAGS_DIF_TO_PEER_MASK             0x1
-#define XFRQE_PROT_FLAGS_DIF_TO_PEER_SHIFT            4
-#define XFRQE_PROT_FLAGS_HOST_INTERFACE_MASK          0x3
-#define XFRQE_PROT_FLAGS_HOST_INTERFACE_SHIFT         5
-#define XFRQE_PROT_FLAGS_RESERVED_MASK                0x1
-#define XFRQE_PROT_FLAGS_RESERVED_SHIFT               7
+#define XFRQE_PROT_FLAGS_PROT_INTERVAL_SIZE_LOG_MASK   0xF
+#define XFRQE_PROT_FLAGS_PROT_INTERVAL_SIZE_LOG_SHIFT  0
+#define XFRQE_PROT_FLAGS_DIF_TO_PEER_MASK              0x1
+#define XFRQE_PROT_FLAGS_DIF_TO_PEER_SHIFT             4
+#define XFRQE_PROT_FLAGS_HOST_INTERFACE_MASK           0x3
+#define XFRQE_PROT_FLAGS_HOST_INTERFACE_SHIFT          5
+#define XFRQE_PROT_FLAGS_RESERVED_MASK                 0x1
+#define XFRQE_PROT_FLAGS_RESERVED_SHIFT                        7
 };
 
+/* FCoE doorbell data */
 struct fcoe_db_data {
        u8 params;
-#define FCOE_DB_DATA_DEST_MASK         0x3
-#define FCOE_DB_DATA_DEST_SHIFT        0
-#define FCOE_DB_DATA_AGG_CMD_MASK      0x3
-#define FCOE_DB_DATA_AGG_CMD_SHIFT     2
-#define FCOE_DB_DATA_BYPASS_EN_MASK    0x1
-#define FCOE_DB_DATA_BYPASS_EN_SHIFT   4
-#define FCOE_DB_DATA_RESERVED_MASK     0x1
-#define FCOE_DB_DATA_RESERVED_SHIFT    5
-#define FCOE_DB_DATA_AGG_VAL_SEL_MASK  0x3
-#define FCOE_DB_DATA_AGG_VAL_SEL_SHIFT 6
+#define FCOE_DB_DATA_DEST_MASK         0x3
+#define FCOE_DB_DATA_DEST_SHIFT                0
+#define FCOE_DB_DATA_AGG_CMD_MASK      0x3
+#define FCOE_DB_DATA_AGG_CMD_SHIFT     2
+#define FCOE_DB_DATA_BYPASS_EN_MASK    0x1
+#define FCOE_DB_DATA_BYPASS_EN_SHIFT   4
+#define FCOE_DB_DATA_RESERVED_MASK     0x1
+#define FCOE_DB_DATA_RESERVED_SHIFT    5
+#define FCOE_DB_DATA_AGG_VAL_SEL_MASK  0x3
+#define FCOE_DB_DATA_AGG_VAL_SEL_SHIFT 6
        u8 agg_flags;
        __le16 sq_prod;
 };
+
 #endif /* __FCOE_COMMON__ */
index 85e086c..4cc9b37 100644 (file)
 
 #ifndef __ISCSI_COMMON__
 #define __ISCSI_COMMON__
+
 /**********************/
 /* ISCSI FW CONSTANTS */
 /**********************/
 
 /* iSCSI HSI constants */
-#define ISCSI_DEFAULT_MTU       (1500)
+#define ISCSI_DEFAULT_MTU      (1500)
 
 /* KWQ (kernel work queue) layer codes */
-#define ISCSI_SLOW_PATH_LAYER_CODE   (6)
+#define ISCSI_SLOW_PATH_LAYER_CODE     (6)
 
 /* iSCSI parameter defaults */
-#define ISCSI_DEFAULT_HEADER_DIGEST         (0)
-#define ISCSI_DEFAULT_DATA_DIGEST           (0)
-#define ISCSI_DEFAULT_INITIAL_R2T           (1)
-#define ISCSI_DEFAULT_IMMEDIATE_DATA        (1)
-#define ISCSI_DEFAULT_MAX_PDU_LENGTH        (0x2000)
-#define ISCSI_DEFAULT_FIRST_BURST_LENGTH    (0x10000)
-#define ISCSI_DEFAULT_MAX_BURST_LENGTH      (0x40000)
-#define ISCSI_DEFAULT_MAX_OUTSTANDING_R2T   (1)
+#define ISCSI_DEFAULT_HEADER_DIGEST            (0)
+#define ISCSI_DEFAULT_DATA_DIGEST              (0)
+#define ISCSI_DEFAULT_INITIAL_R2T              (1)
+#define ISCSI_DEFAULT_IMMEDIATE_DATA           (1)
+#define ISCSI_DEFAULT_MAX_PDU_LENGTH           (0x2000)
+#define ISCSI_DEFAULT_FIRST_BURST_LENGTH       (0x10000)
+#define ISCSI_DEFAULT_MAX_BURST_LENGTH         (0x40000)
+#define ISCSI_DEFAULT_MAX_OUTSTANDING_R2T      (1)
 
 /* iSCSI parameter limits */
-#define ISCSI_MIN_VAL_MAX_PDU_LENGTH        (0x200)
-#define ISCSI_MAX_VAL_MAX_PDU_LENGTH        (0xffffff)
-#define ISCSI_MIN_VAL_BURST_LENGTH          (0x200)
-#define ISCSI_MAX_VAL_BURST_LENGTH          (0xffffff)
-#define ISCSI_MIN_VAL_MAX_OUTSTANDING_R2T   (1)
-#define ISCSI_MAX_VAL_MAX_OUTSTANDING_R2T   (0xff)
+#define ISCSI_MIN_VAL_MAX_PDU_LENGTH           (0x200)
+#define ISCSI_MAX_VAL_MAX_PDU_LENGTH           (0xffffff)
+#define ISCSI_MIN_VAL_BURST_LENGTH             (0x200)
+#define ISCSI_MAX_VAL_BURST_LENGTH             (0xffffff)
+#define ISCSI_MIN_VAL_MAX_OUTSTANDING_R2T      (1)
+#define ISCSI_MAX_VAL_MAX_OUTSTANDING_R2T      (0xff)
 
-#define ISCSI_AHS_CNTL_SIZE 4
+#define ISCSI_AHS_CNTL_SIZE    4
 
-#define ISCSI_WQE_NUM_SGES_SLOWIO           (0xf)
+#define ISCSI_WQE_NUM_SGES_SLOWIO      (0xf)
 
 /* iSCSI reserved params */
 #define ISCSI_ITT_ALL_ONES     (0xffffffff)
 #define ISCSI_TTT_ALL_ONES     (0xffffffff)
 
-#define ISCSI_OPTION_1_OFF_CHIP_TCP 1
-#define ISCSI_OPTION_2_ON_CHIP_TCP 2
+#define ISCSI_OPTION_1_OFF_CHIP_TCP    1
+#define ISCSI_OPTION_2_ON_CHIP_TCP     2
 
-#define ISCSI_INITIATOR_MODE 0
-#define ISCSI_TARGET_MODE 1
+#define ISCSI_INITIATOR_MODE   0
+#define ISCSI_TARGET_MODE      1
 
 /* iSCSI request op codes */
 #define ISCSI_OPCODE_NOP_OUT           (0)
 #define ISCSI_OPCODE_LOGOUT_REQUEST    (6)
 
 /* iSCSI response/messages op codes */
-#define ISCSI_OPCODE_NOP_IN             (0x20)
-#define ISCSI_OPCODE_SCSI_RESPONSE      (0x21)
-#define ISCSI_OPCODE_TMF_RESPONSE       (0x22)
-#define ISCSI_OPCODE_LOGIN_RESPONSE     (0x23)
-#define ISCSI_OPCODE_TEXT_RESPONSE      (0x24)
-#define ISCSI_OPCODE_DATA_IN            (0x25)
-#define ISCSI_OPCODE_LOGOUT_RESPONSE    (0x26)
-#define ISCSI_OPCODE_R2T                (0x31)
-#define ISCSI_OPCODE_ASYNC_MSG          (0x32)
-#define ISCSI_OPCODE_REJECT             (0x3f)
+#define ISCSI_OPCODE_NOP_IN            (0x20)
+#define ISCSI_OPCODE_SCSI_RESPONSE     (0x21)
+#define ISCSI_OPCODE_TMF_RESPONSE      (0x22)
+#define ISCSI_OPCODE_LOGIN_RESPONSE    (0x23)
+#define ISCSI_OPCODE_TEXT_RESPONSE     (0x24)
+#define ISCSI_OPCODE_DATA_IN           (0x25)
+#define ISCSI_OPCODE_LOGOUT_RESPONSE   (0x26)
+#define ISCSI_OPCODE_R2T               (0x31)
+#define ISCSI_OPCODE_ASYNC_MSG         (0x32)
+#define ISCSI_OPCODE_REJECT            (0x3f)
 
 /* iSCSI stages */
-#define ISCSI_STAGE_SECURITY_NEGOTIATION            (0)
-#define ISCSI_STAGE_LOGIN_OPERATIONAL_NEGOTIATION   (1)
-#define ISCSI_STAGE_FULL_FEATURE_PHASE              (3)
+#define ISCSI_STAGE_SECURITY_NEGOTIATION               (0)
+#define ISCSI_STAGE_LOGIN_OPERATIONAL_NEGOTIATION      (1)
+#define ISCSI_STAGE_FULL_FEATURE_PHASE                 (3)
 
 /* iSCSI CQE errors */
-#define CQE_ERROR_BITMAP_DATA_DIGEST          (0x08)
-#define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN  (0x10)
-#define CQE_ERROR_BITMAP_DATA_TRUNCATED       (0x20)
+#define CQE_ERROR_BITMAP_DATA_DIGEST           (0x08)
+#define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN   (0x10)
+#define CQE_ERROR_BITMAP_DATA_TRUNCATED                (0x20)
+
+/* Union of data bd_opaque/ tq_tid */
+union bd_opaque_tq_union {
+       __le16 bd_opaque;
+       __le16 tq_tid;
+};
 
+/* ISCSI SGL entry */
 struct cqe_error_bitmap {
        u8 cqe_error_status_bits;
-#define CQE_ERROR_BITMAP_DIF_ERR_BITS_MASK         0x7
-#define CQE_ERROR_BITMAP_DIF_ERR_BITS_SHIFT        0
-#define CQE_ERROR_BITMAP_DATA_DIGEST_ERR_MASK      0x1
-#define CQE_ERROR_BITMAP_DATA_DIGEST_ERR_SHIFT     3
-#define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN_MASK  0x1
-#define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN_SHIFT 4
-#define CQE_ERROR_BITMAP_DATA_TRUNCATED_ERR_MASK   0x1
-#define CQE_ERROR_BITMAP_DATA_TRUNCATED_ERR_SHIFT  5
-#define CQE_ERROR_BITMAP_UNDER_RUN_ERR_MASK        0x1
-#define CQE_ERROR_BITMAP_UNDER_RUN_ERR_SHIFT       6
-#define CQE_ERROR_BITMAP_RESERVED2_MASK            0x1
-#define CQE_ERROR_BITMAP_RESERVED2_SHIFT           7
+#define CQE_ERROR_BITMAP_DIF_ERR_BITS_MASK             0x7
+#define CQE_ERROR_BITMAP_DIF_ERR_BITS_SHIFT            0
+#define CQE_ERROR_BITMAP_DATA_DIGEST_ERR_MASK          0x1
+#define CQE_ERROR_BITMAP_DATA_DIGEST_ERR_SHIFT         3
+#define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN_MASK      0x1
+#define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN_SHIFT     4
+#define CQE_ERROR_BITMAP_DATA_TRUNCATED_ERR_MASK       0x1
+#define CQE_ERROR_BITMAP_DATA_TRUNCATED_ERR_SHIFT      5
+#define CQE_ERROR_BITMAP_UNDER_RUN_ERR_MASK            0x1
+#define CQE_ERROR_BITMAP_UNDER_RUN_ERR_SHIFT           6
+#define CQE_ERROR_BITMAP_RESERVED2_MASK                        0x1
+#define CQE_ERROR_BITMAP_RESERVED2_SHIFT               7
 };
 
 union cqe_error_status {
@@ -126,86 +134,133 @@ union cqe_error_status {
        struct cqe_error_bitmap error_bits;
 };
 
+/* iSCSI Login Response PDU header */
 struct data_hdr {
        __le32 data[12];
 };
 
-struct iscsi_async_msg_hdr {
-       __le16 reserved0;
-       u8 flags_attr;
-#define ISCSI_ASYNC_MSG_HDR_RSRV_MASK           0x7F
-#define ISCSI_ASYNC_MSG_HDR_RSRV_SHIFT          0
-#define ISCSI_ASYNC_MSG_HDR_CONST1_MASK         0x1
-#define ISCSI_ASYNC_MSG_HDR_CONST1_SHIFT        7
-       u8 opcode;
-       __le32 hdr_second_dword;
-#define ISCSI_ASYNC_MSG_HDR_DATA_SEG_LEN_MASK   0xFFFFFF
-#define ISCSI_ASYNC_MSG_HDR_DATA_SEG_LEN_SHIFT  0
-#define ISCSI_ASYNC_MSG_HDR_TOTAL_AHS_LEN_MASK  0xFF
-#define ISCSI_ASYNC_MSG_HDR_TOTAL_AHS_LEN_SHIFT 24
-       struct regpair lun;
-       __le32 all_ones;
-       __le32 reserved1;
-       __le32 stat_sn;
-       __le32 exp_cmd_sn;
-       __le32 max_cmd_sn;
-       __le16 param1_rsrv;
-       u8 async_vcode;
-       u8 async_event;
-       __le16 param3_rsrv;
-       __le16 param2_rsrv;
-       __le32 reserved7;
+struct lun_mapper_addr_reserved {
+       struct regpair lun_mapper_addr;
+       u8 reserved0[8];
+};
+
+/* rdif conetxt for dif on immediate */
+struct dif_on_immediate_params {
+       __le32 initial_ref_tag;
+       __le16 application_tag;
+       __le16 application_tag_mask;
+       __le16 flags1;
+#define DIF_ON_IMMEDIATE_PARAMS_VALIDATE_GUARD_MASK            0x1
+#define DIF_ON_IMMEDIATE_PARAMS_VALIDATE_GUARD_SHIFT           0
+#define DIF_ON_IMMEDIATE_PARAMS_VALIDATE_APP_TAG_MASK          0x1
+#define DIF_ON_IMMEDIATE_PARAMS_VALIDATE_APP_TAG_SHIFT         1
+#define DIF_ON_IMMEDIATE_PARAMS_VALIDATE_REF_TAG_MASK          0x1
+#define DIF_ON_IMMEDIATE_PARAMS_VALIDATE_REF_TAG_SHIFT         2
+#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_GUARD_MASK             0x1
+#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_GUARD_SHIFT            3
+#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_APP_TAG_MASK           0x1
+#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_APP_TAG_SHIFT          4
+#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_REF_TAG_MASK           0x1
+#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_REF_TAG_SHIFT          5
+#define DIF_ON_IMMEDIATE_PARAMS_INTERVAL_SIZE_MASK             0x1
+#define DIF_ON_IMMEDIATE_PARAMS_INTERVAL_SIZE_SHIFT            6
+#define DIF_ON_IMMEDIATE_PARAMS_NETWORK_INTERFACE_MASK         0x1
+#define DIF_ON_IMMEDIATE_PARAMS_NETWORK_INTERFACE_SHIFT                7
+#define DIF_ON_IMMEDIATE_PARAMS_HOST_INTERFACE_MASK            0x3
+#define DIF_ON_IMMEDIATE_PARAMS_HOST_INTERFACE_SHIFT           8
+#define DIF_ON_IMMEDIATE_PARAMS_REF_TAG_MASK_MASK              0xF
+#define DIF_ON_IMMEDIATE_PARAMS_REF_TAG_MASK_SHIFT             10
+#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_APP_TAG_WITH_MASK_MASK 0x1
+#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_APP_TAG_WITH_MASK_SHIFT        14
+#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_REF_TAG_WITH_MASK_MASK 0x1
+#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_REF_TAG_WITH_MASK_SHIFT        15
+       u8 flags0;
+#define DIF_ON_IMMEDIATE_PARAMS_RESERVED_MASK                  0x1
+#define DIF_ON_IMMEDIATE_PARAMS_RESERVED_SHIFT                 0
+#define DIF_ON_IMMEDIATE_PARAMS_IGNORE_APP_TAG_MASK            0x1
+#define DIF_ON_IMMEDIATE_PARAMS_IGNORE_APP_TAG_SHIFT           1
+#define DIF_ON_IMMEDIATE_PARAMS_INITIAL_REF_TAG_IS_VALID_MASK  0x1
+#define DIF_ON_IMMEDIATE_PARAMS_INITIAL_REF_TAG_IS_VALID_SHIFT 2
+#define DIF_ON_IMMEDIATE_PARAMS_HOST_GUARD_TYPE_MASK           0x1
+#define DIF_ON_IMMEDIATE_PARAMS_HOST_GUARD_TYPE_SHIFT          3
+#define DIF_ON_IMMEDIATE_PARAMS_PROTECTION_TYPE_MASK           0x3
+#define DIF_ON_IMMEDIATE_PARAMS_PROTECTION_TYPE_SHIFT          4
+#define DIF_ON_IMMEDIATE_PARAMS_CRC_SEED_MASK                  0x1
+#define DIF_ON_IMMEDIATE_PARAMS_CRC_SEED_SHIFT                 6
+#define DIF_ON_IMMEDIATE_PARAMS_KEEP_REF_TAG_CONST_MASK                0x1
+#define DIF_ON_IMMEDIATE_PARAMS_KEEP_REF_TAG_CONST_SHIFT       7
+       u8 reserved_zero[5];
+};
+
+/* iSCSI dif on immediate mode attributes union */
+union dif_configuration_params {
+       struct lun_mapper_addr_reserved lun_mapper_address;
+       struct dif_on_immediate_params def_dif_conf;
+};
+
+/* Union of data/r2t sequence number */
+union iscsi_seq_num {
+       __le16 data_sn;
+       __le16 r2t_sn;
 };
 
-struct iscsi_cmd_hdr {
-       __le16 reserved1;
-       u8 flags_attr;
-#define ISCSI_CMD_HDR_ATTR_MASK                0x7
-#define ISCSI_CMD_HDR_ATTR_SHIFT       0
-#define ISCSI_CMD_HDR_RSRV_MASK                0x3
-#define ISCSI_CMD_HDR_RSRV_SHIFT       3
-#define ISCSI_CMD_HDR_WRITE_MASK       0x1
-#define ISCSI_CMD_HDR_WRITE_SHIFT      5
-#define ISCSI_CMD_HDR_READ_MASK                0x1
-#define ISCSI_CMD_HDR_READ_SHIFT       6
-#define ISCSI_CMD_HDR_FINAL_MASK       0x1
-#define ISCSI_CMD_HDR_FINAL_SHIFT      7
-       u8 hdr_first_byte;
-#define ISCSI_CMD_HDR_OPCODE_MASK      0x3F
-#define ISCSI_CMD_HDR_OPCODE_SHIFT     0
-#define ISCSI_CMD_HDR_IMM_MASK         0x1
-#define ISCSI_CMD_HDR_IMM_SHIFT                6
-#define ISCSI_CMD_HDR_RSRV1_MASK       0x1
-#define ISCSI_CMD_HDR_RSRV1_SHIFT      7
-       __le32 hdr_second_dword;
-#define ISCSI_CMD_HDR_DATA_SEG_LEN_MASK   0xFFFFFF
-#define ISCSI_CMD_HDR_DATA_SEG_LEN_SHIFT  0
-#define ISCSI_CMD_HDR_TOTAL_AHS_LEN_MASK  0xFF
-#define ISCSI_CMD_HDR_TOTAL_AHS_LEN_SHIFT 24
-       struct regpair lun;
-       __le32 itt;
-       __le32 expected_transfer_length;
-       __le32 cmd_sn;
-       __le32 exp_stat_sn;
-       __le32 cdb[4];
+/* iSCSI DIF flags */
+struct iscsi_dif_flags {
+       u8 flags;
+#define ISCSI_DIF_FLAGS_PROT_INTERVAL_SIZE_LOG_MASK    0xF
+#define ISCSI_DIF_FLAGS_PROT_INTERVAL_SIZE_LOG_SHIFT   0
+#define ISCSI_DIF_FLAGS_DIF_TO_PEER_MASK               0x1
+#define ISCSI_DIF_FLAGS_DIF_TO_PEER_SHIFT              4
+#define ISCSI_DIF_FLAGS_HOST_INTERFACE_MASK            0x7
+#define ISCSI_DIF_FLAGS_HOST_INTERFACE_SHIFT           5
 };
 
+/* The iscsi storm task context of Ystorm */
+struct ystorm_iscsi_task_state {
+       struct scsi_cached_sges data_desc;
+       struct scsi_sgl_params sgl_params;
+       __le32 exp_r2t_sn;
+       __le32 buffer_offset;
+       union iscsi_seq_num seq_num;
+       struct iscsi_dif_flags dif_flags;
+       u8 flags;
+#define YSTORM_ISCSI_TASK_STATE_LOCAL_COMP_MASK                0x1
+#define YSTORM_ISCSI_TASK_STATE_LOCAL_COMP_SHIFT       0
+#define YSTORM_ISCSI_TASK_STATE_SLOW_IO_MASK           0x1
+#define YSTORM_ISCSI_TASK_STATE_SLOW_IO_SHIFT          1
+#define YSTORM_ISCSI_TASK_STATE_SET_DIF_OFFSET_MASK    0x1
+#define YSTORM_ISCSI_TASK_STATE_SET_DIF_OFFSET_SHIFT   2
+#define YSTORM_ISCSI_TASK_STATE_RESERVED0_MASK         0x1F
+#define YSTORM_ISCSI_TASK_STATE_RESERVED0_SHIFT                3
+};
+
+/* The iscsi storm task context of Ystorm */
+struct ystorm_iscsi_task_rxmit_opt {
+       __le32 fast_rxmit_sge_offset;
+       __le32 scan_start_buffer_offset;
+       __le32 fast_rxmit_buffer_offset;
+       u8 scan_start_sgl_index;
+       u8 fast_rxmit_sgl_index;
+       __le16 reserved;
+};
+
+/* iSCSI Common PDU header */
 struct iscsi_common_hdr {
        u8 hdr_status;
        u8 hdr_response;
        u8 hdr_flags;
        u8 hdr_first_byte;
-#define ISCSI_COMMON_HDR_OPCODE_MASK         0x3F
-#define ISCSI_COMMON_HDR_OPCODE_SHIFT        0
-#define ISCSI_COMMON_HDR_IMM_MASK            0x1
-#define ISCSI_COMMON_HDR_IMM_SHIFT           6
-#define ISCSI_COMMON_HDR_RSRV_MASK           0x1
-#define ISCSI_COMMON_HDR_RSRV_SHIFT          7
+#define ISCSI_COMMON_HDR_OPCODE_MASK           0x3F
+#define ISCSI_COMMON_HDR_OPCODE_SHIFT          0
+#define ISCSI_COMMON_HDR_IMM_MASK              0x1
+#define ISCSI_COMMON_HDR_IMM_SHIFT             6
+#define ISCSI_COMMON_HDR_RSRV_MASK             0x1
+#define ISCSI_COMMON_HDR_RSRV_SHIFT            7
        __le32 hdr_second_dword;
-#define ISCSI_COMMON_HDR_DATA_SEG_LEN_MASK   0xFFFFFF
-#define ISCSI_COMMON_HDR_DATA_SEG_LEN_SHIFT  0
-#define ISCSI_COMMON_HDR_TOTAL_AHS_LEN_MASK  0xFF
-#define ISCSI_COMMON_HDR_TOTAL_AHS_LEN_SHIFT 24
+#define ISCSI_COMMON_HDR_DATA_SEG_LEN_MASK     0xFFFFFF
+#define ISCSI_COMMON_HDR_DATA_SEG_LEN_SHIFT    0
+#define ISCSI_COMMON_HDR_TOTAL_AHS_LEN_MASK    0xFF
+#define ISCSI_COMMON_HDR_TOTAL_AHS_LEN_SHIFT   24
        struct regpair lun_reserved;
        __le32 itt;
        __le32 ttt;
@@ -215,86 +270,60 @@ struct iscsi_common_hdr {
        __le32 data[3];
 };
 
-struct iscsi_conn_offload_params {
-       struct regpair sq_pbl_addr;
-       struct regpair r2tq_pbl_addr;
-       struct regpair xhq_pbl_addr;
-       struct regpair uhq_pbl_addr;
-       __le32 initial_ack;
-       __le16 physical_q0;
-       __le16 physical_q1;
-       u8 flags;
-#define ISCSI_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B_MASK  0x1
-#define ISCSI_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B_SHIFT 0
-#define ISCSI_CONN_OFFLOAD_PARAMS_TARGET_MODE_MASK     0x1
-#define ISCSI_CONN_OFFLOAD_PARAMS_TARGET_MODE_SHIFT    1
-#define ISCSI_CONN_OFFLOAD_PARAMS_RESTRICTED_MODE_MASK 0x1
-#define ISCSI_CONN_OFFLOAD_PARAMS_RESTRICTED_MODE_SHIFT        2
-#define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_MASK       0x1F
-#define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_SHIFT      3
-       u8 pbl_page_size_log;
-       u8 pbe_page_size_log;
-       u8 default_cq;
-       __le32 stat_sn;
-};
-
-struct iscsi_slow_path_hdr {
-       u8 op_code;
-       u8 flags;
-#define ISCSI_SLOW_PATH_HDR_RESERVED0_MASK   0xF
-#define ISCSI_SLOW_PATH_HDR_RESERVED0_SHIFT  0
-#define ISCSI_SLOW_PATH_HDR_LAYER_CODE_MASK  0x7
-#define ISCSI_SLOW_PATH_HDR_LAYER_CODE_SHIFT 4
-#define ISCSI_SLOW_PATH_HDR_RESERVED1_MASK   0x1
-#define ISCSI_SLOW_PATH_HDR_RESERVED1_SHIFT  7
-};
-
-struct iscsi_conn_update_ramrod_params {
-       struct iscsi_slow_path_hdr hdr;
-       __le16 conn_id;
-       __le32 fw_cid;
-       u8 flags;
-#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_HD_EN_MASK           0x1
-#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_HD_EN_SHIFT          0
-#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DD_EN_MASK           0x1
-#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DD_EN_SHIFT          1
-#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_INITIAL_R2T_MASK     0x1
-#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_INITIAL_R2T_SHIFT    2
-#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA_MASK  0x1
-#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA_SHIFT 3
-#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_BLOCK_SIZE_MASK  0x1
-#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_BLOCK_SIZE_SHIFT 4
-#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_HOST_EN_MASK  0x1
-#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_HOST_EN_SHIFT 5
-#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_RESERVED1_MASK       0x3
-#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_RESERVED1_SHIFT      6
-       u8 reserved0[3];
-       __le32 max_seq_size;
-       __le32 max_send_pdu_length;
-       __le32 max_recv_pdu_length;
-       __le32 first_seq_length;
+/* iSCSI Command PDU header */
+struct iscsi_cmd_hdr {
+       __le16 reserved1;
+       u8 flags_attr;
+#define ISCSI_CMD_HDR_ATTR_MASK                        0x7
+#define ISCSI_CMD_HDR_ATTR_SHIFT               0
+#define ISCSI_CMD_HDR_RSRV_MASK                        0x3
+#define ISCSI_CMD_HDR_RSRV_SHIFT               3
+#define ISCSI_CMD_HDR_WRITE_MASK               0x1
+#define ISCSI_CMD_HDR_WRITE_SHIFT              5
+#define ISCSI_CMD_HDR_READ_MASK                        0x1
+#define ISCSI_CMD_HDR_READ_SHIFT               6
+#define ISCSI_CMD_HDR_FINAL_MASK               0x1
+#define ISCSI_CMD_HDR_FINAL_SHIFT              7
+       u8 hdr_first_byte;
+#define ISCSI_CMD_HDR_OPCODE_MASK              0x3F
+#define ISCSI_CMD_HDR_OPCODE_SHIFT             0
+#define ISCSI_CMD_HDR_IMM_MASK                 0x1
+#define ISCSI_CMD_HDR_IMM_SHIFT                        6
+#define ISCSI_CMD_HDR_RSRV1_MASK               0x1
+#define ISCSI_CMD_HDR_RSRV1_SHIFT              7
+       __le32 hdr_second_dword;
+#define ISCSI_CMD_HDR_DATA_SEG_LEN_MASK                0xFFFFFF
+#define ISCSI_CMD_HDR_DATA_SEG_LEN_SHIFT       0
+#define ISCSI_CMD_HDR_TOTAL_AHS_LEN_MASK       0xFF
+#define ISCSI_CMD_HDR_TOTAL_AHS_LEN_SHIFT      24
+       struct regpair lun;
+       __le32 itt;
+       __le32 expected_transfer_length;
+       __le32 cmd_sn;
        __le32 exp_stat_sn;
+       __le32 cdb[4];
 };
 
+/* iSCSI Command PDU header with Extended CDB (Initiator Mode) */
 struct iscsi_ext_cdb_cmd_hdr {
        __le16 reserved1;
        u8 flags_attr;
-#define ISCSI_EXT_CDB_CMD_HDR_ATTR_MASK          0x7
-#define ISCSI_EXT_CDB_CMD_HDR_ATTR_SHIFT         0
-#define ISCSI_EXT_CDB_CMD_HDR_RSRV_MASK          0x3
-#define ISCSI_EXT_CDB_CMD_HDR_RSRV_SHIFT         3
-#define ISCSI_EXT_CDB_CMD_HDR_WRITE_MASK         0x1
-#define ISCSI_EXT_CDB_CMD_HDR_WRITE_SHIFT        5
-#define ISCSI_EXT_CDB_CMD_HDR_READ_MASK          0x1
-#define ISCSI_EXT_CDB_CMD_HDR_READ_SHIFT         6
-#define ISCSI_EXT_CDB_CMD_HDR_FINAL_MASK         0x1
-#define ISCSI_EXT_CDB_CMD_HDR_FINAL_SHIFT        7
+#define ISCSI_EXT_CDB_CMD_HDR_ATTR_MASK                0x7
+#define ISCSI_EXT_CDB_CMD_HDR_ATTR_SHIFT       0
+#define ISCSI_EXT_CDB_CMD_HDR_RSRV_MASK                0x3
+#define ISCSI_EXT_CDB_CMD_HDR_RSRV_SHIFT       3
+#define ISCSI_EXT_CDB_CMD_HDR_WRITE_MASK       0x1
+#define ISCSI_EXT_CDB_CMD_HDR_WRITE_SHIFT      5
+#define ISCSI_EXT_CDB_CMD_HDR_READ_MASK                0x1
+#define ISCSI_EXT_CDB_CMD_HDR_READ_SHIFT       6
+#define ISCSI_EXT_CDB_CMD_HDR_FINAL_MASK       0x1
+#define ISCSI_EXT_CDB_CMD_HDR_FINAL_SHIFT      7
        u8 opcode;
        __le32 hdr_second_dword;
-#define ISCSI_EXT_CDB_CMD_HDR_DATA_SEG_LEN_MASK  0xFFFFFF
-#define ISCSI_EXT_CDB_CMD_HDR_DATA_SEG_LEN_SHIFT 0
-#define ISCSI_EXT_CDB_CMD_HDR_CDB_SIZE_MASK      0xFF
-#define ISCSI_EXT_CDB_CMD_HDR_CDB_SIZE_SHIFT     24
+#define ISCSI_EXT_CDB_CMD_HDR_DATA_SEG_LEN_MASK                0xFFFFFF
+#define ISCSI_EXT_CDB_CMD_HDR_DATA_SEG_LEN_SHIFT       0
+#define ISCSI_EXT_CDB_CMD_HDR_CDB_SIZE_MASK            0xFF
+#define ISCSI_EXT_CDB_CMD_HDR_CDB_SIZE_SHIFT           24
        struct regpair lun;
        __le32 itt;
        __le32 expected_transfer_length;
@@ -303,26 +332,27 @@ struct iscsi_ext_cdb_cmd_hdr {
        struct scsi_sge cdb_sge;
 };
 
+/* iSCSI login request PDU header */
 struct iscsi_login_req_hdr {
        u8 version_min;
        u8 version_max;
        u8 flags_attr;
-#define ISCSI_LOGIN_REQ_HDR_NSG_MASK            0x3
-#define ISCSI_LOGIN_REQ_HDR_NSG_SHIFT           0
-#define ISCSI_LOGIN_REQ_HDR_CSG_MASK            0x3
-#define ISCSI_LOGIN_REQ_HDR_CSG_SHIFT           2
-#define ISCSI_LOGIN_REQ_HDR_RSRV_MASK           0x3
-#define ISCSI_LOGIN_REQ_HDR_RSRV_SHIFT          4
-#define ISCSI_LOGIN_REQ_HDR_C_MASK              0x1
-#define ISCSI_LOGIN_REQ_HDR_C_SHIFT             6
-#define ISCSI_LOGIN_REQ_HDR_T_MASK              0x1
-#define ISCSI_LOGIN_REQ_HDR_T_SHIFT             7
+#define ISCSI_LOGIN_REQ_HDR_NSG_MASK   0x3
+#define ISCSI_LOGIN_REQ_HDR_NSG_SHIFT  0
+#define ISCSI_LOGIN_REQ_HDR_CSG_MASK   0x3
+#define ISCSI_LOGIN_REQ_HDR_CSG_SHIFT  2
+#define ISCSI_LOGIN_REQ_HDR_RSRV_MASK  0x3
+#define ISCSI_LOGIN_REQ_HDR_RSRV_SHIFT 4
+#define ISCSI_LOGIN_REQ_HDR_C_MASK     0x1
+#define ISCSI_LOGIN_REQ_HDR_C_SHIFT    6
+#define ISCSI_LOGIN_REQ_HDR_T_MASK     0x1
+#define ISCSI_LOGIN_REQ_HDR_T_SHIFT    7
        u8 opcode;
        __le32 hdr_second_dword;
-#define ISCSI_LOGIN_REQ_HDR_DATA_SEG_LEN_MASK   0xFFFFFF
-#define ISCSI_LOGIN_REQ_HDR_DATA_SEG_LEN_SHIFT  0
-#define ISCSI_LOGIN_REQ_HDR_TOTAL_AHS_LEN_MASK  0xFF
-#define ISCSI_LOGIN_REQ_HDR_TOTAL_AHS_LEN_SHIFT 24
+#define ISCSI_LOGIN_REQ_HDR_DATA_SEG_LEN_MASK  0xFFFFFF
+#define ISCSI_LOGIN_REQ_HDR_DATA_SEG_LEN_SHIFT 0
+#define ISCSI_LOGIN_REQ_HDR_TOTAL_AHS_LEN_MASK 0xFF
+#define ISCSI_LOGIN_REQ_HDR_TOTAL_AHS_LEN_SHIFT        24
        __le32 isid_tabc;
        __le16 tsih;
        __le16 isid_d;
@@ -334,6 +364,7 @@ struct iscsi_login_req_hdr {
        __le32 reserved2[4];
 };
 
+/* iSCSI logout request PDU header */
 struct iscsi_logout_req_hdr {
        __le16 reserved0;
        u8 reason_code;
@@ -348,13 +379,14 @@ struct iscsi_logout_req_hdr {
        __le32 reserved4[4];
 };
 
+/* iSCSI Data-out PDU header */
 struct iscsi_data_out_hdr {
        __le16 reserved1;
        u8 flags_attr;
-#define ISCSI_DATA_OUT_HDR_RSRV_MASK   0x7F
-#define ISCSI_DATA_OUT_HDR_RSRV_SHIFT  0
-#define ISCSI_DATA_OUT_HDR_FINAL_MASK  0x1
-#define ISCSI_DATA_OUT_HDR_FINAL_SHIFT 7
+#define ISCSI_DATA_OUT_HDR_RSRV_MASK   0x7F
+#define ISCSI_DATA_OUT_HDR_RSRV_SHIFT  0
+#define ISCSI_DATA_OUT_HDR_FINAL_MASK  0x1
+#define ISCSI_DATA_OUT_HDR_FINAL_SHIFT 7
        u8 opcode;
        __le32 reserved2;
        struct regpair lun;
@@ -368,22 +400,23 @@ struct iscsi_data_out_hdr {
        __le32 reserved5;
 };
 
+/* iSCSI Data-in PDU header */
 struct iscsi_data_in_hdr {
        u8 status_rsvd;
        u8 reserved1;
        u8 flags;
-#define ISCSI_DATA_IN_HDR_STATUS_MASK     0x1
-#define ISCSI_DATA_IN_HDR_STATUS_SHIFT    0
-#define ISCSI_DATA_IN_HDR_UNDERFLOW_MASK  0x1
-#define ISCSI_DATA_IN_HDR_UNDERFLOW_SHIFT 1
-#define ISCSI_DATA_IN_HDR_OVERFLOW_MASK   0x1
-#define ISCSI_DATA_IN_HDR_OVERFLOW_SHIFT  2
-#define ISCSI_DATA_IN_HDR_RSRV_MASK       0x7
-#define ISCSI_DATA_IN_HDR_RSRV_SHIFT      3
-#define ISCSI_DATA_IN_HDR_ACK_MASK        0x1
-#define ISCSI_DATA_IN_HDR_ACK_SHIFT       6
-#define ISCSI_DATA_IN_HDR_FINAL_MASK      0x1
-#define ISCSI_DATA_IN_HDR_FINAL_SHIFT     7
+#define ISCSI_DATA_IN_HDR_STATUS_MASK          0x1
+#define ISCSI_DATA_IN_HDR_STATUS_SHIFT         0
+#define ISCSI_DATA_IN_HDR_UNDERFLOW_MASK       0x1
+#define ISCSI_DATA_IN_HDR_UNDERFLOW_SHIFT      1
+#define ISCSI_DATA_IN_HDR_OVERFLOW_MASK                0x1
+#define ISCSI_DATA_IN_HDR_OVERFLOW_SHIFT       2
+#define ISCSI_DATA_IN_HDR_RSRV_MASK            0x7
+#define ISCSI_DATA_IN_HDR_RSRV_SHIFT           3
+#define ISCSI_DATA_IN_HDR_ACK_MASK             0x1
+#define ISCSI_DATA_IN_HDR_ACK_SHIFT            6
+#define ISCSI_DATA_IN_HDR_FINAL_MASK           0x1
+#define ISCSI_DATA_IN_HDR_FINAL_SHIFT          7
        u8 opcode;
        __le32 reserved2;
        struct regpair lun;
@@ -397,6 +430,7 @@ struct iscsi_data_in_hdr {
        __le32 residual_count;
 };
 
+/* iSCSI R2T PDU header */
 struct iscsi_r2t_hdr {
        u8 reserved0[3];
        u8 opcode;
@@ -412,13 +446,14 @@ struct iscsi_r2t_hdr {
        __le32 desired_data_trns_len;
 };
 
+/* iSCSI NOP-out PDU header */
 struct iscsi_nop_out_hdr {
        __le16 reserved1;
        u8 flags_attr;
-#define ISCSI_NOP_OUT_HDR_RSRV_MASK    0x7F
-#define ISCSI_NOP_OUT_HDR_RSRV_SHIFT   0
-#define ISCSI_NOP_OUT_HDR_CONST1_MASK  0x1
-#define ISCSI_NOP_OUT_HDR_CONST1_SHIFT 7
+#define ISCSI_NOP_OUT_HDR_RSRV_MASK    0x7F
+#define ISCSI_NOP_OUT_HDR_RSRV_SHIFT   0
+#define ISCSI_NOP_OUT_HDR_CONST1_MASK  0x1
+#define ISCSI_NOP_OUT_HDR_CONST1_SHIFT 7
        u8 opcode;
        __le32 reserved2;
        struct regpair lun;
@@ -432,19 +467,20 @@ struct iscsi_nop_out_hdr {
        __le32 reserved6;
 };
 
+/* iSCSI NOP-in PDU header */
 struct iscsi_nop_in_hdr {
        __le16 reserved0;
        u8 flags_attr;
-#define ISCSI_NOP_IN_HDR_RSRV_MASK           0x7F
-#define ISCSI_NOP_IN_HDR_RSRV_SHIFT          0
-#define ISCSI_NOP_IN_HDR_CONST1_MASK         0x1
-#define ISCSI_NOP_IN_HDR_CONST1_SHIFT        7
+#define ISCSI_NOP_IN_HDR_RSRV_MASK     0x7F
+#define ISCSI_NOP_IN_HDR_RSRV_SHIFT    0
+#define ISCSI_NOP_IN_HDR_CONST1_MASK   0x1
+#define ISCSI_NOP_IN_HDR_CONST1_SHIFT  7
        u8 opcode;
        __le32 hdr_second_dword;
-#define ISCSI_NOP_IN_HDR_DATA_SEG_LEN_MASK   0xFFFFFF
-#define ISCSI_NOP_IN_HDR_DATA_SEG_LEN_SHIFT  0
-#define ISCSI_NOP_IN_HDR_TOTAL_AHS_LEN_MASK  0xFF
-#define ISCSI_NOP_IN_HDR_TOTAL_AHS_LEN_SHIFT 24
+#define ISCSI_NOP_IN_HDR_DATA_SEG_LEN_MASK     0xFFFFFF
+#define ISCSI_NOP_IN_HDR_DATA_SEG_LEN_SHIFT    0
+#define ISCSI_NOP_IN_HDR_TOTAL_AHS_LEN_MASK    0xFF
+#define ISCSI_NOP_IN_HDR_TOTAL_AHS_LEN_SHIFT   24
        struct regpair lun;
        __le32 itt;
        __le32 ttt;
@@ -456,26 +492,27 @@ struct iscsi_nop_in_hdr {
        __le32 reserved7;
 };
 
+/* iSCSI Login Response PDU header */
 struct iscsi_login_response_hdr {
        u8 version_active;
        u8 version_max;
        u8 flags_attr;
-#define ISCSI_LOGIN_RESPONSE_HDR_NSG_MASK            0x3
-#define ISCSI_LOGIN_RESPONSE_HDR_NSG_SHIFT           0
-#define ISCSI_LOGIN_RESPONSE_HDR_CSG_MASK            0x3
-#define ISCSI_LOGIN_RESPONSE_HDR_CSG_SHIFT           2
-#define ISCSI_LOGIN_RESPONSE_HDR_RSRV_MASK           0x3
-#define ISCSI_LOGIN_RESPONSE_HDR_RSRV_SHIFT          4
-#define ISCSI_LOGIN_RESPONSE_HDR_C_MASK              0x1
-#define ISCSI_LOGIN_RESPONSE_HDR_C_SHIFT             6
-#define ISCSI_LOGIN_RESPONSE_HDR_T_MASK              0x1
-#define ISCSI_LOGIN_RESPONSE_HDR_T_SHIFT             7
+#define ISCSI_LOGIN_RESPONSE_HDR_NSG_MASK      0x3
+#define ISCSI_LOGIN_RESPONSE_HDR_NSG_SHIFT     0
+#define ISCSI_LOGIN_RESPONSE_HDR_CSG_MASK      0x3
+#define ISCSI_LOGIN_RESPONSE_HDR_CSG_SHIFT     2
+#define ISCSI_LOGIN_RESPONSE_HDR_RSRV_MASK     0x3
+#define ISCSI_LOGIN_RESPONSE_HDR_RSRV_SHIFT    4
+#define ISCSI_LOGIN_RESPONSE_HDR_C_MASK                0x1
+#define ISCSI_LOGIN_RESPONSE_HDR_C_SHIFT       6
+#define ISCSI_LOGIN_RESPONSE_HDR_T_MASK                0x1
+#define ISCSI_LOGIN_RESPONSE_HDR_T_SHIFT       7
        u8 opcode;
        __le32 hdr_second_dword;
-#define ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_MASK   0xFFFFFF
-#define ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_SHIFT  0
-#define ISCSI_LOGIN_RESPONSE_HDR_TOTAL_AHS_LEN_MASK  0xFF
-#define ISCSI_LOGIN_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24
+#define ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_MASK     0xFFFFFF
+#define ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_SHIFT    0
+#define ISCSI_LOGIN_RESPONSE_HDR_TOTAL_AHS_LEN_MASK    0xFF
+#define ISCSI_LOGIN_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT   24
        __le32 isid_tabc;
        __le16 tsih;
        __le16 isid_d;
@@ -490,16 +527,17 @@ struct iscsi_login_response_hdr {
        __le32 reserved4[2];
 };
 
+/* iSCSI Logout Response PDU header */
 struct iscsi_logout_response_hdr {
        u8 reserved1;
        u8 response;
        u8 flags;
        u8 opcode;
        __le32 hdr_second_dword;
-#define ISCSI_LOGOUT_RESPONSE_HDR_DATA_SEG_LEN_MASK   0xFFFFFF
-#define ISCSI_LOGOUT_RESPONSE_HDR_DATA_SEG_LEN_SHIFT  0
-#define ISCSI_LOGOUT_RESPONSE_HDR_TOTAL_AHS_LEN_MASK  0xFF
-#define ISCSI_LOGOUT_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24
+#define ISCSI_LOGOUT_RESPONSE_HDR_DATA_SEG_LEN_MASK    0xFFFFFF
+#define ISCSI_LOGOUT_RESPONSE_HDR_DATA_SEG_LEN_SHIFT   0
+#define ISCSI_LOGOUT_RESPONSE_HDR_TOTAL_AHS_LEN_MASK   0xFF
+#define ISCSI_LOGOUT_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT  24
        __le32 reserved2[2];
        __le32 itt;
        __le32 reserved3;
@@ -512,21 +550,22 @@ struct iscsi_logout_response_hdr {
        __le32 reserved5[1];
 };
 
+/* iSCSI Text Request PDU header */
 struct iscsi_text_request_hdr {
        __le16 reserved0;
        u8 flags_attr;
-#define ISCSI_TEXT_REQUEST_HDR_RSRV_MASK           0x3F
-#define ISCSI_TEXT_REQUEST_HDR_RSRV_SHIFT          0
-#define ISCSI_TEXT_REQUEST_HDR_C_MASK              0x1
-#define ISCSI_TEXT_REQUEST_HDR_C_SHIFT             6
-#define ISCSI_TEXT_REQUEST_HDR_F_MASK              0x1
-#define ISCSI_TEXT_REQUEST_HDR_F_SHIFT             7
+#define ISCSI_TEXT_REQUEST_HDR_RSRV_MASK       0x3F
+#define ISCSI_TEXT_REQUEST_HDR_RSRV_SHIFT      0
+#define ISCSI_TEXT_REQUEST_HDR_C_MASK          0x1
+#define ISCSI_TEXT_REQUEST_HDR_C_SHIFT         6
+#define ISCSI_TEXT_REQUEST_HDR_F_MASK          0x1
+#define ISCSI_TEXT_REQUEST_HDR_F_SHIFT         7
        u8 opcode;
        __le32 hdr_second_dword;
-#define ISCSI_TEXT_REQUEST_HDR_DATA_SEG_LEN_MASK   0xFFFFFF
-#define ISCSI_TEXT_REQUEST_HDR_DATA_SEG_LEN_SHIFT  0
-#define ISCSI_TEXT_REQUEST_HDR_TOTAL_AHS_LEN_MASK  0xFF
-#define ISCSI_TEXT_REQUEST_HDR_TOTAL_AHS_LEN_SHIFT 24
+#define ISCSI_TEXT_REQUEST_HDR_DATA_SEG_LEN_MASK       0xFFFFFF
+#define ISCSI_TEXT_REQUEST_HDR_DATA_SEG_LEN_SHIFT      0
+#define ISCSI_TEXT_REQUEST_HDR_TOTAL_AHS_LEN_MASK      0xFF
+#define ISCSI_TEXT_REQUEST_HDR_TOTAL_AHS_LEN_SHIFT     24
        struct regpair lun;
        __le32 itt;
        __le32 ttt;
@@ -535,21 +574,22 @@ struct iscsi_text_request_hdr {
        __le32 reserved4[4];
 };
 
+/* iSCSI Text Response PDU header */
 struct iscsi_text_response_hdr {
        __le16 reserved1;
        u8 flags;
-#define ISCSI_TEXT_RESPONSE_HDR_RSRV_MASK           0x3F
-#define ISCSI_TEXT_RESPONSE_HDR_RSRV_SHIFT          0
-#define ISCSI_TEXT_RESPONSE_HDR_C_MASK              0x1
-#define ISCSI_TEXT_RESPONSE_HDR_C_SHIFT             6
-#define ISCSI_TEXT_RESPONSE_HDR_F_MASK              0x1
-#define ISCSI_TEXT_RESPONSE_HDR_F_SHIFT             7
+#define ISCSI_TEXT_RESPONSE_HDR_RSRV_MASK      0x3F
+#define ISCSI_TEXT_RESPONSE_HDR_RSRV_SHIFT     0
+#define ISCSI_TEXT_RESPONSE_HDR_C_MASK         0x1
+#define ISCSI_TEXT_RESPONSE_HDR_C_SHIFT                6
+#define ISCSI_TEXT_RESPONSE_HDR_F_MASK         0x1
+#define ISCSI_TEXT_RESPONSE_HDR_F_SHIFT                7
        u8 opcode;
        __le32 hdr_second_dword;
-#define ISCSI_TEXT_RESPONSE_HDR_DATA_SEG_LEN_MASK   0xFFFFFF
-#define ISCSI_TEXT_RESPONSE_HDR_DATA_SEG_LEN_SHIFT  0
-#define ISCSI_TEXT_RESPONSE_HDR_TOTAL_AHS_LEN_MASK  0xFF
-#define ISCSI_TEXT_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24
+#define ISCSI_TEXT_RESPONSE_HDR_DATA_SEG_LEN_MASK      0xFFFFFF
+#define ISCSI_TEXT_RESPONSE_HDR_DATA_SEG_LEN_SHIFT     0
+#define ISCSI_TEXT_RESPONSE_HDR_TOTAL_AHS_LEN_MASK     0xFF
+#define ISCSI_TEXT_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT    24
        struct regpair lun;
        __le32 itt;
        __le32 ttt;
@@ -559,15 +599,16 @@ struct iscsi_text_response_hdr {
        __le32 reserved4[3];
 };
 
+/* iSCSI TMF Request PDU header */
 struct iscsi_tmf_request_hdr {
        __le16 reserved0;
        u8 function;
        u8 opcode;
        __le32 hdr_second_dword;
-#define ISCSI_TMF_REQUEST_HDR_DATA_SEG_LEN_MASK   0xFFFFFF
-#define ISCSI_TMF_REQUEST_HDR_DATA_SEG_LEN_SHIFT  0
-#define ISCSI_TMF_REQUEST_HDR_TOTAL_AHS_LEN_MASK  0xFF
-#define ISCSI_TMF_REQUEST_HDR_TOTAL_AHS_LEN_SHIFT 24
+#define ISCSI_TMF_REQUEST_HDR_DATA_SEG_LEN_MASK                0xFFFFFF
+#define ISCSI_TMF_REQUEST_HDR_DATA_SEG_LEN_SHIFT       0
+#define ISCSI_TMF_REQUEST_HDR_TOTAL_AHS_LEN_MASK       0xFF
+#define ISCSI_TMF_REQUEST_HDR_TOTAL_AHS_LEN_SHIFT      24
        struct regpair lun;
        __le32 itt;
        __le32 rtt;
@@ -584,10 +625,10 @@ struct iscsi_tmf_response_hdr {
        u8 hdr_flags;
        u8 opcode;
        __le32 hdr_second_dword;
-#define ISCSI_TMF_RESPONSE_HDR_DATA_SEG_LEN_MASK   0xFFFFFF
-#define ISCSI_TMF_RESPONSE_HDR_DATA_SEG_LEN_SHIFT  0
-#define ISCSI_TMF_RESPONSE_HDR_TOTAL_AHS_LEN_MASK  0xFF
-#define ISCSI_TMF_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24
+#define ISCSI_TMF_RESPONSE_HDR_DATA_SEG_LEN_MASK       0xFFFFFF
+#define ISCSI_TMF_RESPONSE_HDR_DATA_SEG_LEN_SHIFT      0
+#define ISCSI_TMF_RESPONSE_HDR_TOTAL_AHS_LEN_MASK      0xFF
+#define ISCSI_TMF_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT     24
        struct regpair reserved0;
        __le32 itt;
        __le32 reserved1;
@@ -597,16 +638,17 @@ struct iscsi_tmf_response_hdr {
        __le32 reserved4[3];
 };
 
+/* iSCSI Response PDU header */
 struct iscsi_response_hdr {
        u8 hdr_status;
        u8 hdr_response;
        u8 hdr_flags;
        u8 opcode;
        __le32 hdr_second_dword;
-#define ISCSI_RESPONSE_HDR_DATA_SEG_LEN_MASK   0xFFFFFF
-#define ISCSI_RESPONSE_HDR_DATA_SEG_LEN_SHIFT  0
-#define ISCSI_RESPONSE_HDR_TOTAL_AHS_LEN_MASK  0xFF
-#define ISCSI_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24
+#define ISCSI_RESPONSE_HDR_DATA_SEG_LEN_MASK   0xFFFFFF
+#define ISCSI_RESPONSE_HDR_DATA_SEG_LEN_SHIFT  0
+#define ISCSI_RESPONSE_HDR_TOTAL_AHS_LEN_MASK  0xFF
+#define ISCSI_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24
        struct regpair lun;
        __le32 itt;
        __le32 snack_tag;
@@ -618,16 +660,17 @@ struct iscsi_response_hdr {
        __le32 residual_count;
 };
 
+/* iSCSI Reject PDU header */
 struct iscsi_reject_hdr {
        u8 reserved4;
        u8 hdr_reason;
        u8 hdr_flags;
        u8 opcode;
        __le32 hdr_second_dword;
-#define ISCSI_REJECT_HDR_DATA_SEG_LEN_MASK   0xFFFFFF
-#define ISCSI_REJECT_HDR_DATA_SEG_LEN_SHIFT  0
-#define ISCSI_REJECT_HDR_TOTAL_AHS_LEN_MASK  0xFF
-#define ISCSI_REJECT_HDR_TOTAL_AHS_LEN_SHIFT 24
+#define ISCSI_REJECT_HDR_DATA_SEG_LEN_MASK     0xFFFFFF
+#define ISCSI_REJECT_HDR_DATA_SEG_LEN_SHIFT    0
+#define ISCSI_REJECT_HDR_TOTAL_AHS_LEN_MASK    0xFF
+#define ISCSI_REJECT_HDR_TOTAL_AHS_LEN_SHIFT   24
        struct regpair reserved0;
        __le32 all_ones;
        __le32 reserved2;
@@ -638,6 +681,35 @@ struct iscsi_reject_hdr {
        __le32 reserved3[2];
 };
 
+/* iSCSI Asynchronous Message PDU header */
+struct iscsi_async_msg_hdr {
+       __le16 reserved0;
+       u8 flags_attr;
+#define ISCSI_ASYNC_MSG_HDR_RSRV_MASK          0x7F
+#define ISCSI_ASYNC_MSG_HDR_RSRV_SHIFT         0
+#define ISCSI_ASYNC_MSG_HDR_CONST1_MASK                0x1
+#define ISCSI_ASYNC_MSG_HDR_CONST1_SHIFT       7
+       u8 opcode;
+       __le32 hdr_second_dword;
+#define ISCSI_ASYNC_MSG_HDR_DATA_SEG_LEN_MASK  0xFFFFFF
+#define ISCSI_ASYNC_MSG_HDR_DATA_SEG_LEN_SHIFT 0
+#define ISCSI_ASYNC_MSG_HDR_TOTAL_AHS_LEN_MASK 0xFF
+#define ISCSI_ASYNC_MSG_HDR_TOTAL_AHS_LEN_SHIFT        24
+       struct regpair lun;
+       __le32 all_ones;
+       __le32 reserved1;
+       __le32 stat_sn;
+       __le32 exp_cmd_sn;
+       __le32 max_cmd_sn;
+       __le16 param1_rsrv;
+       u8 async_vcode;
+       u8 async_event;
+       __le16 param3_rsrv;
+       __le16 param2_rsrv;
+       __le32 reserved7;
+};
+
+/* PDU header part of Ystorm task context */
 union iscsi_task_hdr {
        struct iscsi_common_hdr common;
        struct data_hdr data;
@@ -661,6 +733,348 @@ union iscsi_task_hdr {
        struct iscsi_async_msg_hdr async_msg;
 };
 
+/* The iscsi storm task context of Ystorm */
+struct ystorm_iscsi_task_st_ctx {
+       struct ystorm_iscsi_task_state state;
+       struct ystorm_iscsi_task_rxmit_opt rxmit_opt;
+       union iscsi_task_hdr pdu_hdr;
+};
+
+struct e4_ystorm_iscsi_task_ag_ctx {
+       u8 reserved;
+       u8 byte1;
+       __le16 word0;
+       u8 flags0;
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_MASK       0xF
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_SHIFT      0
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT0_MASK          0x1
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT0_SHIFT         4
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK          0x1
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT         5
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_VALID_MASK         0x1
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_VALID_SHIFT                6
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT3_MASK          0x1
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT3_SHIFT         7
+       u8 flags1;
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF0_MASK           0x3
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF0_SHIFT          0
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF1_MASK           0x3
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT          2
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF2SPECIAL_MASK    0x3
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF2SPECIAL_SHIFT   4
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF0EN_MASK         0x1
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF0EN_SHIFT                6
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK         0x1
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT                7
+       u8 flags2;
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT4_MASK          0x1
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT4_SHIFT         0
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK       0x1
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT      1
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK       0x1
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT      2
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK       0x1
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT      3
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK       0x1
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT      4
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK       0x1
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT      5
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK       0x1
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT      6
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK       0x1
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT      7
+       u8 byte2;
+       __le32 TTT;
+       u8 byte3;
+       u8 byte4;
+       __le16 word1;
+};
+
+struct e4_mstorm_iscsi_task_ag_ctx {
+       u8 cdu_validation;
+       u8 byte1;
+       __le16 task_cid;
+       u8 flags0;
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_MASK       0xF
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_SHIFT      0
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_MASK          0x1
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_SHIFT         4
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK                  0x1
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT                 5
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_VALID_MASK                 0x1
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_VALID_SHIFT                        6
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_FLAG_MASK     0x1
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_FLAG_SHIFT    7
+       u8 flags1;
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_MASK       0x3
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_SHIFT      0
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF1_MASK                   0x3
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT                  2
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF2_MASK                   0x3
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF2_SHIFT                  4
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_EN_MASK    0x1
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_EN_SHIFT   6
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK                 0x1
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT                        7
+       u8 flags2;
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF2EN_MASK         0x1
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF2EN_SHIFT                0
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK       0x1
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT      1
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK       0x1
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT      2
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK       0x1
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT      3
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK       0x1
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT      4
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK       0x1
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT      5
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK       0x1
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT      6
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK       0x1
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT      7
+       u8 byte2;
+       __le32 reg0;
+       u8 byte3;
+       u8 byte4;
+       __le16 word1;
+};
+
+struct e4_ustorm_iscsi_task_ag_ctx {
+       u8 reserved;
+       u8 state;
+       __le16 icid;
+       u8 flags0;
+#define E4_USTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_MASK       0xF
+#define E4_USTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_SHIFT      0
+#define E4_USTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_MASK          0x1
+#define E4_USTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_SHIFT         4
+#define E4_USTORM_ISCSI_TASK_AG_CTX_BIT1_MASK                  0x1
+#define E4_USTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT                 5
+#define E4_USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_MASK         0x3
+#define E4_USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_SHIFT                6
+       u8 flags1;
+#define E4_USTORM_ISCSI_TASK_AG_CTX_RESERVED1_MASK     0x3
+#define E4_USTORM_ISCSI_TASK_AG_CTX_RESERVED1_SHIFT    0
+#define E4_USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_MASK      0x3
+#define E4_USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_SHIFT     2
+#define E4_USTORM_ISCSI_TASK_AG_CTX_CF3_MASK           0x3
+#define E4_USTORM_ISCSI_TASK_AG_CTX_CF3_SHIFT          4
+#define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_MASK  0x3
+#define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6
+       u8 flags2;
+#define E4_USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_EN_MASK      0x1
+#define E4_USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_EN_SHIFT     0
+#define E4_USTORM_ISCSI_TASK_AG_CTX_DISABLE_DATA_ACKED_MASK    0x1
+#define E4_USTORM_ISCSI_TASK_AG_CTX_DISABLE_DATA_ACKED_SHIFT   1
+#define E4_USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_EN_MASK           0x1
+#define E4_USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_EN_SHIFT          2
+#define E4_USTORM_ISCSI_TASK_AG_CTX_CF3EN_MASK                 0x1
+#define E4_USTORM_ISCSI_TASK_AG_CTX_CF3EN_SHIFT                        3
+#define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK       0x1
+#define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT      4
+#define E4_USTORM_ISCSI_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_MASK 0x1
+#define E4_USTORM_ISCSI_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_SHIFT        5
+#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK               0x1
+#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT              6
+#define E4_USTORM_ISCSI_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_MASK   0x1
+#define E4_USTORM_ISCSI_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_SHIFT  7
+       u8 flags3;
+#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK               0x1
+#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT              0
+#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK               0x1
+#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT              1
+#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK               0x1
+#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT              2
+#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK               0x1
+#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT              3
+#define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_TYPE_MASK                0xF
+#define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT       4
+       __le32 dif_err_intervals;
+       __le32 dif_error_1st_interval;
+       __le32 rcv_cont_len;
+       __le32 exp_cont_len;
+       __le32 total_data_acked;
+       __le32 exp_data_acked;
+       u8 next_tid_valid;
+       u8 byte3;
+       __le16 word1;
+       __le16 next_tid;
+       __le16 word3;
+       __le32 hdr_residual_count;
+       __le32 exp_r2t_sn;
+};
+
+/* The iscsi storm task context of Mstorm */
+struct mstorm_iscsi_task_st_ctx {
+       struct scsi_cached_sges data_desc;
+       struct scsi_sgl_params sgl_params;
+       __le32 rem_task_size;
+       __le32 data_buffer_offset;
+       u8 task_type;
+       struct iscsi_dif_flags dif_flags;
+       __le16 dif_task_icid;
+       struct regpair sense_db;
+       __le32 expected_itt;
+       __le32 reserved1;
+};
+
+struct iscsi_reg1 {
+       __le32 reg1_map;
+#define ISCSI_REG1_NUM_SGES_MASK       0xF
+#define ISCSI_REG1_NUM_SGES_SHIFT      0
+#define ISCSI_REG1_RESERVED1_MASK      0xFFFFFFF
+#define ISCSI_REG1_RESERVED1_SHIFT     4
+};
+
+struct tqe_opaque {
+       __le16 opaque[2];
+};
+
+/* The iscsi storm task context of Ustorm */
+struct ustorm_iscsi_task_st_ctx {
+       __le32 rem_rcv_len;
+       __le32 exp_data_transfer_len;
+       __le32 exp_data_sn;
+       struct regpair lun;
+       struct iscsi_reg1 reg1;
+       u8 flags2;
+#define USTORM_ISCSI_TASK_ST_CTX_AHS_EXIST_MASK                0x1
+#define USTORM_ISCSI_TASK_ST_CTX_AHS_EXIST_SHIFT       0
+#define USTORM_ISCSI_TASK_ST_CTX_RESERVED1_MASK                0x7F
+#define USTORM_ISCSI_TASK_ST_CTX_RESERVED1_SHIFT       1
+       struct iscsi_dif_flags dif_flags;
+       __le16 reserved3;
+       struct tqe_opaque tqe_opaque_list;
+       __le32 reserved5;
+       __le32 reserved6;
+       __le32 reserved7;
+       u8 task_type;
+       u8 error_flags;
+#define USTORM_ISCSI_TASK_ST_CTX_DATA_DIGEST_ERROR_MASK                0x1
+#define USTORM_ISCSI_TASK_ST_CTX_DATA_DIGEST_ERROR_SHIFT       0
+#define USTORM_ISCSI_TASK_ST_CTX_DATA_TRUNCATED_ERROR_MASK     0x1
+#define USTORM_ISCSI_TASK_ST_CTX_DATA_TRUNCATED_ERROR_SHIFT    1
+#define USTORM_ISCSI_TASK_ST_CTX_UNDER_RUN_ERROR_MASK          0x1
+#define USTORM_ISCSI_TASK_ST_CTX_UNDER_RUN_ERROR_SHIFT         2
+#define USTORM_ISCSI_TASK_ST_CTX_RESERVED8_MASK                        0x1F
+#define USTORM_ISCSI_TASK_ST_CTX_RESERVED8_SHIFT               3
+       u8 flags;
+#define USTORM_ISCSI_TASK_ST_CTX_CQE_WRITE_MASK                        0x3
+#define USTORM_ISCSI_TASK_ST_CTX_CQE_WRITE_SHIFT               0
+#define USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP_MASK               0x1
+#define USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP_SHIFT              2
+#define USTORM_ISCSI_TASK_ST_CTX_Q0_R2TQE_WRITE_MASK           0x1
+#define USTORM_ISCSI_TASK_ST_CTX_Q0_R2TQE_WRITE_SHIFT          3
+#define USTORM_ISCSI_TASK_ST_CTX_TOTAL_DATA_ACKED_DONE_MASK    0x1
+#define USTORM_ISCSI_TASK_ST_CTX_TOTAL_DATA_ACKED_DONE_SHIFT   4
+#define USTORM_ISCSI_TASK_ST_CTX_HQ_SCANNED_DONE_MASK          0x1
+#define USTORM_ISCSI_TASK_ST_CTX_HQ_SCANNED_DONE_SHIFT         5
+#define USTORM_ISCSI_TASK_ST_CTX_R2T2RECV_DONE_MASK            0x1
+#define USTORM_ISCSI_TASK_ST_CTX_R2T2RECV_DONE_SHIFT           6
+#define USTORM_ISCSI_TASK_ST_CTX_RESERVED0_MASK                        0x1
+#define USTORM_ISCSI_TASK_ST_CTX_RESERVED0_SHIFT               7
+       u8 cq_rss_number;
+};
+
+/* iscsi task context */
+struct e4_iscsi_task_context {
+       struct ystorm_iscsi_task_st_ctx ystorm_st_context;
+       struct e4_ystorm_iscsi_task_ag_ctx ystorm_ag_context;
+       struct regpair ystorm_ag_padding[2];
+       struct tdif_task_context tdif_context;
+       struct e4_mstorm_iscsi_task_ag_ctx mstorm_ag_context;
+       struct regpair mstorm_ag_padding[2];
+       struct e4_ustorm_iscsi_task_ag_ctx ustorm_ag_context;
+       struct mstorm_iscsi_task_st_ctx mstorm_st_context;
+       struct ustorm_iscsi_task_st_ctx ustorm_st_context;
+       struct rdif_task_context rdif_context;
+};
+
+/* iSCSI connection offload params passed by driver to FW in ISCSI offload
+ * ramrod.
+ */
+struct iscsi_conn_offload_params {
+       struct regpair sq_pbl_addr;
+       struct regpair r2tq_pbl_addr;
+       struct regpair xhq_pbl_addr;
+       struct regpair uhq_pbl_addr;
+       __le32 initial_ack;
+       __le16 physical_q0;
+       __le16 physical_q1;
+       u8 flags;
+#define ISCSI_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B_MASK  0x1
+#define ISCSI_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B_SHIFT 0
+#define ISCSI_CONN_OFFLOAD_PARAMS_TARGET_MODE_MASK     0x1
+#define ISCSI_CONN_OFFLOAD_PARAMS_TARGET_MODE_SHIFT    1
+#define ISCSI_CONN_OFFLOAD_PARAMS_RESTRICTED_MODE_MASK 0x1
+#define ISCSI_CONN_OFFLOAD_PARAMS_RESTRICTED_MODE_SHIFT        2
+#define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_MASK       0x1F
+#define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_SHIFT      3
+       u8 pbl_page_size_log;
+       u8 pbe_page_size_log;
+       u8 default_cq;
+       __le32 stat_sn;
+};
+
+/* iSCSI connection statistics */
+struct iscsi_conn_stats_params {
+       struct regpair iscsi_tcp_tx_packets_cnt;
+       struct regpair iscsi_tcp_tx_bytes_cnt;
+       struct regpair iscsi_tcp_tx_rxmit_cnt;
+       struct regpair iscsi_tcp_rx_packets_cnt;
+       struct regpair iscsi_tcp_rx_bytes_cnt;
+       struct regpair iscsi_tcp_rx_dup_ack_cnt;
+       __le32 iscsi_tcp_rx_chksum_err_cnt;
+       __le32 reserved;
+};
+
+/* spe message header */
+struct iscsi_slow_path_hdr {
+       u8 op_code;
+       u8 flags;
+#define ISCSI_SLOW_PATH_HDR_RESERVED0_MASK     0xF
+#define ISCSI_SLOW_PATH_HDR_RESERVED0_SHIFT    0
+#define ISCSI_SLOW_PATH_HDR_LAYER_CODE_MASK    0x7
+#define ISCSI_SLOW_PATH_HDR_LAYER_CODE_SHIFT   4
+#define ISCSI_SLOW_PATH_HDR_RESERVED1_MASK     0x1
+#define ISCSI_SLOW_PATH_HDR_RESERVED1_SHIFT    7
+};
+
+/* iSCSI connection update params passed by driver to FW in ISCSI update
+ *ramrod.
+ */
+struct iscsi_conn_update_ramrod_params {
+       struct iscsi_slow_path_hdr hdr;
+       __le16 conn_id;
+       __le32 fw_cid;
+       u8 flags;
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_HD_EN_MASK             0x1
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_HD_EN_SHIFT            0
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DD_EN_MASK             0x1
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DD_EN_SHIFT            1
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_INITIAL_R2T_MASK       0x1
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_INITIAL_R2T_SHIFT      2
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA_MASK    0x1
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA_SHIFT   3
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_BLOCK_SIZE_MASK    0x1
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_BLOCK_SIZE_SHIFT   4
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_HOST_EN_MASK    0x1
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_HOST_EN_SHIFT   5
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_IMM_EN_MASK     0x1
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_IMM_EN_SHIFT    6
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_LUN_MAPPER_EN_MASK     0x1
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_LUN_MAPPER_EN_SHIFT    7
+       u8 reserved0[3];
+       __le32 max_seq_size;
+       __le32 max_send_pdu_length;
+       __le32 max_recv_pdu_length;
+       __le32 first_seq_length;
+       __le32 exp_stat_sn;
+       union dif_configuration_params dif_on_imme_params;
+};
+
+/* iSCSI CQ element */
 struct iscsi_cqe_common {
        __le16 conn_id;
        u8 cqe_type;
@@ -669,6 +1083,7 @@ struct iscsi_cqe_common {
        union iscsi_task_hdr iscsi_hdr;
 };
 
+/* iSCSI CQ element */
 struct iscsi_cqe_solicited {
        __le16 conn_id;
        u8 cqe_type;
@@ -678,10 +1093,11 @@ struct iscsi_cqe_solicited {
        u8 fw_dbg_field;
        u8 caused_conn_err;
        u8 reserved0[3];
-       __le32 reserved1[1];
+       __le32 data_truncated_bytes;
        union iscsi_task_hdr iscsi_hdr;
 };
 
+/* iSCSI CQ element */
 struct iscsi_cqe_unsolicited {
        __le16 conn_id;
        u8 cqe_type;
@@ -689,16 +1105,19 @@ struct iscsi_cqe_unsolicited {
        __le16 reserved0;
        u8 reserved1;
        u8 unsol_cqe_type;
-       struct regpair rqe_opaque;
+       __le16 rqe_opaque;
+       __le16 reserved2[3];
        union iscsi_task_hdr iscsi_hdr;
 };
 
+/* iSCSI CQ element */
 union iscsi_cqe {
        struct iscsi_cqe_common cqe_common;
        struct iscsi_cqe_solicited cqe_solicited;
        struct iscsi_cqe_unsolicited cqe_unsolicited;
 };
 
+/* iSCSI CQE type */
 enum iscsi_cqes_type {
        ISCSI_CQE_TYPE_SOLICITED = 1,
        ISCSI_CQE_TYPE_UNSOLICITED,
@@ -708,6 +1127,7 @@ enum iscsi_cqes_type {
        MAX_ISCSI_CQES_TYPE
 };
 
+/* iSCSI CQE type */
 enum iscsi_cqe_unsolicited_type {
        ISCSI_CQE_UNSOLICITED_NONE,
        ISCSI_CQE_UNSOLICITED_SINGLE,
@@ -717,37 +1137,28 @@ enum iscsi_cqe_unsolicited_type {
        MAX_ISCSI_CQE_UNSOLICITED_TYPE
 };
 
-
+/* iscsi debug modes */
 struct iscsi_debug_modes {
        u8 flags;
-#define ISCSI_DEBUG_MODES_ASSERT_IF_RX_CONN_ERROR_MASK         0x1
-#define ISCSI_DEBUG_MODES_ASSERT_IF_RX_CONN_ERROR_SHIFT        0
-#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_RESET_MASK            0x1
-#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_RESET_SHIFT           1
-#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_FIN_MASK              0x1
-#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_FIN_SHIFT             2
-#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_CLEANUP_MASK          0x1
-#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_CLEANUP_SHIFT         3
-#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_REJECT_OR_ASYNC_MASK  0x1
-#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_REJECT_OR_ASYNC_SHIFT 4
-#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_NOP_MASK              0x1
-#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_NOP_SHIFT             5
-#define ISCSI_DEBUG_MODES_ASSERT_IF_DATA_DIGEST_ERROR_MASK     0x1
-#define ISCSI_DEBUG_MODES_ASSERT_IF_DATA_DIGEST_ERROR_SHIFT    6
-#define ISCSI_DEBUG_MODES_ASSERT_IF_DIF_ERROR_MASK             0x1
-#define ISCSI_DEBUG_MODES_ASSERT_IF_DIF_ERROR_SHIFT            7
-};
-
-struct iscsi_dif_flags {
-       u8 flags;
-#define ISCSI_DIF_FLAGS_PROT_INTERVAL_SIZE_LOG_MASK  0xF
-#define ISCSI_DIF_FLAGS_PROT_INTERVAL_SIZE_LOG_SHIFT 0
-#define ISCSI_DIF_FLAGS_DIF_TO_PEER_MASK             0x1
-#define ISCSI_DIF_FLAGS_DIF_TO_PEER_SHIFT            4
-#define ISCSI_DIF_FLAGS_HOST_INTERFACE_MASK          0x7
-#define ISCSI_DIF_FLAGS_HOST_INTERFACE_SHIFT         5
-};
-
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RX_CONN_ERROR_MASK                 0x1
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RX_CONN_ERROR_SHIFT                        0
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_RESET_MASK                    0x1
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_RESET_SHIFT                   1
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_FIN_MASK                      0x1
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_FIN_SHIFT                     2
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_CLEANUP_MASK                  0x1
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_CLEANUP_SHIFT                 3
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_REJECT_OR_ASYNC_MASK          0x1
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_REJECT_OR_ASYNC_SHIFT         4
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_NOP_MASK                      0x1
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_NOP_SHIFT                     5
+#define ISCSI_DEBUG_MODES_ASSERT_IF_DIF_OR_DATA_DIGEST_ERROR_MASK      0x1
+#define ISCSI_DEBUG_MODES_ASSERT_IF_DIF_OR_DATA_DIGEST_ERROR_SHIFT     6
+#define ISCSI_DEBUG_MODES_ASSERT_IF_HQ_CORRUPT_MASK                    0x1
+#define ISCSI_DEBUG_MODES_ASSERT_IF_HQ_CORRUPT_SHIFT                   7
+};
+
+/* iSCSI kernel completion queue IDs */
 enum iscsi_eqe_opcode {
        ISCSI_EVENT_TYPE_INIT_FUNC = 0,
        ISCSI_EVENT_TYPE_DESTROY_FUNC,
@@ -756,9 +1167,9 @@ enum iscsi_eqe_opcode {
        ISCSI_EVENT_TYPE_CLEAR_SQ,
        ISCSI_EVENT_TYPE_TERMINATE_CONN,
        ISCSI_EVENT_TYPE_MAC_UPDATE_CONN,
+       ISCSI_EVENT_TYPE_COLLECT_STATS_CONN,
        ISCSI_EVENT_TYPE_ASYN_CONNECT_COMPLETE,
        ISCSI_EVENT_TYPE_ASYN_TERMINATE_DONE,
-       RESERVED9,
        ISCSI_EVENT_TYPE_START_OF_ERROR_TYPES = 10,
        ISCSI_EVENT_TYPE_ASYN_ABORT_RCVD,
        ISCSI_EVENT_TYPE_ASYN_CLOSE_RCVD,
@@ -772,6 +1183,7 @@ enum iscsi_eqe_opcode {
        MAX_ISCSI_EQE_OPCODE
 };
 
+/* iSCSI EQE and CQE completion status */
 enum iscsi_error_types {
        ISCSI_STATUS_NONE = 0,
        ISCSI_CQE_ERROR_UNSOLICITED_RCV_ON_INVALID_CONN = 1,
@@ -823,7 +1235,7 @@ enum iscsi_error_types {
        MAX_ISCSI_ERROR_TYPES
 };
 
-
+/* iSCSI Ramrod Command IDs */
 enum iscsi_ramrod_cmd_id {
        ISCSI_RAMROD_CMD_ID_UNUSED = 0,
        ISCSI_RAMROD_CMD_ID_INIT_FUNC = 1,
@@ -833,22 +1245,11 @@ enum iscsi_ramrod_cmd_id {
        ISCSI_RAMROD_CMD_ID_TERMINATION_CONN = 5,
        ISCSI_RAMROD_CMD_ID_CLEAR_SQ = 6,
        ISCSI_RAMROD_CMD_ID_MAC_UPDATE = 7,
+       ISCSI_RAMROD_CMD_ID_CONN_STATS = 8,
        MAX_ISCSI_RAMROD_CMD_ID
 };
 
-struct iscsi_reg1 {
-       __le32 reg1_map;
-#define ISCSI_REG1_NUM_SGES_MASK   0xF
-#define ISCSI_REG1_NUM_SGES_SHIFT  0
-#define ISCSI_REG1_RESERVED1_MASK  0xFFFFFFF
-#define ISCSI_REG1_RESERVED1_SHIFT 4
-};
-
-union iscsi_seq_num {
-       __le16 data_sn;
-       __le16 r2t_sn;
-};
-
+/* iSCSI connection termination request */
 struct iscsi_spe_conn_mac_update {
        struct iscsi_slow_path_hdr hdr;
        __le16 conn_id;
@@ -859,6 +1260,9 @@ struct iscsi_spe_conn_mac_update {
        u8 reserved0[2];
 };
 
+/* iSCSI and TCP connection (Option 1) offload params passed by driver to FW in
+ * iSCSI offload ramrod.
+ */
 struct iscsi_spe_conn_offload {
        struct iscsi_slow_path_hdr hdr;
        __le16 conn_id;
@@ -867,6 +1271,9 @@ struct iscsi_spe_conn_offload {
        struct tcp_offload_params tcp;
 };
 
+/* iSCSI and TCP connection(Option 2) offload params passed by driver to FW in
+ * iSCSI offload ramrod.
+ */
 struct iscsi_spe_conn_offload_option2 {
        struct iscsi_slow_path_hdr hdr;
        __le16 conn_id;
@@ -875,6 +1282,17 @@ struct iscsi_spe_conn_offload_option2 {
        struct tcp_offload_params_opt2 tcp;
 };
 
+/* iSCSI collect connection statistics request */
+struct iscsi_spe_conn_statistics {
+       struct iscsi_slow_path_hdr hdr;
+       __le16 conn_id;
+       __le32 fw_cid;
+       u8 reset_stats;
+       u8 reserved0[7];
+       struct regpair stats_cnts_addr;
+};
+
+/* iSCSI connection termination request */
 struct iscsi_spe_conn_termination {
        struct iscsi_slow_path_hdr hdr;
        __le16 conn_id;
@@ -885,12 +1303,14 @@ struct iscsi_spe_conn_termination {
        struct regpair query_params_addr;
 };
 
+/* iSCSI firmware function destroy parameters */
 struct iscsi_spe_func_dstry {
        struct iscsi_slow_path_hdr hdr;
        __le16 reserved0;
        __le32 reserved1;
 };
 
+/* iSCSI firmware function init parameters */
 struct iscsi_spe_func_init {
        struct iscsi_slow_path_hdr hdr;
        __le16 half_way_close_timeout;
@@ -898,283 +1318,19 @@ struct iscsi_spe_func_init {
        u8 num_r2tq_pages_in_ring;
        u8 num_uhq_pages_in_ring;
        u8 ll2_rx_queue_id;
-       u8 ooo_enable;
+       u8 flags;
+#define ISCSI_SPE_FUNC_INIT_COUNTERS_EN_MASK   0x1
+#define ISCSI_SPE_FUNC_INIT_COUNTERS_EN_SHIFT  0
+#define ISCSI_SPE_FUNC_INIT_RESERVED0_MASK     0x7F
+#define ISCSI_SPE_FUNC_INIT_RESERVED0_SHIFT    1
        struct iscsi_debug_modes debug_mode;
        __le16 reserved1;
        __le32 reserved2;
-       __le32 reserved3;
-       __le32 reserved4;
        struct scsi_init_func_params func_params;
        struct scsi_init_func_queues q_params;
 };
 
-struct ystorm_iscsi_task_state {
-       struct scsi_cached_sges data_desc;
-       struct scsi_sgl_params sgl_params;
-       __le32 exp_r2t_sn;
-       __le32 buffer_offset;
-       union iscsi_seq_num seq_num;
-       struct iscsi_dif_flags dif_flags;
-       u8 flags;
-#define YSTORM_ISCSI_TASK_STATE_LOCAL_COMP_MASK  0x1
-#define YSTORM_ISCSI_TASK_STATE_LOCAL_COMP_SHIFT 0
-#define YSTORM_ISCSI_TASK_STATE_SLOW_IO_MASK     0x1
-#define YSTORM_ISCSI_TASK_STATE_SLOW_IO_SHIFT    1
-#define YSTORM_ISCSI_TASK_STATE_RESERVED0_MASK   0x3F
-#define YSTORM_ISCSI_TASK_STATE_RESERVED0_SHIFT  2
-};
-
-struct ystorm_iscsi_task_rxmit_opt {
-       __le32 fast_rxmit_sge_offset;
-       __le32 scan_start_buffer_offset;
-       __le32 fast_rxmit_buffer_offset;
-       u8 scan_start_sgl_index;
-       u8 fast_rxmit_sgl_index;
-       __le16 reserved;
-};
-
-struct ystorm_iscsi_task_st_ctx {
-       struct ystorm_iscsi_task_state state;
-       struct ystorm_iscsi_task_rxmit_opt rxmit_opt;
-       union iscsi_task_hdr pdu_hdr;
-};
-
-struct ystorm_iscsi_task_ag_ctx {
-       u8 reserved;
-       u8 byte1;
-       __le16 word0;
-       u8 flags0;
-#define YSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_MASK     0xF
-#define YSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_SHIFT    0
-#define YSTORM_ISCSI_TASK_AG_CTX_BIT0_MASK        0x1
-#define YSTORM_ISCSI_TASK_AG_CTX_BIT0_SHIFT       4
-#define YSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK        0x1
-#define YSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT       5
-#define YSTORM_ISCSI_TASK_AG_CTX_VALID_MASK       0x1
-#define YSTORM_ISCSI_TASK_AG_CTX_VALID_SHIFT      6
-#define YSTORM_ISCSI_TASK_AG_CTX_BIT3_MASK        0x1
-#define YSTORM_ISCSI_TASK_AG_CTX_BIT3_SHIFT       7
-       u8 flags1;
-#define YSTORM_ISCSI_TASK_AG_CTX_CF0_MASK         0x3
-#define YSTORM_ISCSI_TASK_AG_CTX_CF0_SHIFT        0
-#define YSTORM_ISCSI_TASK_AG_CTX_CF1_MASK         0x3
-#define YSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT        2
-#define YSTORM_ISCSI_TASK_AG_CTX_CF2SPECIAL_MASK  0x3
-#define YSTORM_ISCSI_TASK_AG_CTX_CF2SPECIAL_SHIFT 4
-#define YSTORM_ISCSI_TASK_AG_CTX_CF0EN_MASK       0x1
-#define YSTORM_ISCSI_TASK_AG_CTX_CF0EN_SHIFT      6
-#define YSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK       0x1
-#define YSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT      7
-       u8 flags2;
-#define YSTORM_ISCSI_TASK_AG_CTX_BIT4_MASK        0x1
-#define YSTORM_ISCSI_TASK_AG_CTX_BIT4_SHIFT       0
-#define YSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK     0x1
-#define YSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT    1
-#define YSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK     0x1
-#define YSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT    2
-#define YSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK     0x1
-#define YSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT    3
-#define YSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK     0x1
-#define YSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT    4
-#define YSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK     0x1
-#define YSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT    5
-#define YSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK     0x1
-#define YSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT    6
-#define YSTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK     0x1
-#define YSTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT    7
-       u8 byte2;
-       __le32 TTT;
-       u8 byte3;
-       u8 byte4;
-       __le16 word1;
-};
-
-struct mstorm_iscsi_task_ag_ctx {
-       u8 cdu_validation;
-       u8 byte1;
-       __le16 task_cid;
-       u8 flags0;
-#define MSTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_MASK     0xF
-#define MSTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_SHIFT    0
-#define MSTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_MASK        0x1
-#define MSTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_SHIFT       4
-#define MSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK                0x1
-#define MSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT               5
-#define MSTORM_ISCSI_TASK_AG_CTX_VALID_MASK               0x1
-#define MSTORM_ISCSI_TASK_AG_CTX_VALID_SHIFT              6
-#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_FLAG_MASK   0x1
-#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_FLAG_SHIFT  7
-       u8 flags1;
-#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_MASK     0x3
-#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_SHIFT    0
-#define MSTORM_ISCSI_TASK_AG_CTX_CF1_MASK                 0x3
-#define MSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT                2
-#define MSTORM_ISCSI_TASK_AG_CTX_CF2_MASK                 0x3
-#define MSTORM_ISCSI_TASK_AG_CTX_CF2_SHIFT                4
-#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_EN_MASK  0x1
-#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_EN_SHIFT 6
-#define MSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK               0x1
-#define MSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT              7
-       u8 flags2;
-#define MSTORM_ISCSI_TASK_AG_CTX_CF2EN_MASK               0x1
-#define MSTORM_ISCSI_TASK_AG_CTX_CF2EN_SHIFT              0
-#define MSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK             0x1
-#define MSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT            1
-#define MSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK             0x1
-#define MSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT            2
-#define MSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK             0x1
-#define MSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT            3
-#define MSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK             0x1
-#define MSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT            4
-#define MSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK             0x1
-#define MSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT            5
-#define MSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK             0x1
-#define MSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT            6
-#define MSTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK             0x1
-#define MSTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT            7
-       u8 byte2;
-       __le32 reg0;
-       u8 byte3;
-       u8 byte4;
-       __le16 word1;
-};
-
-struct ustorm_iscsi_task_ag_ctx {
-       u8 reserved;
-       u8 state;
-       __le16 icid;
-       u8 flags0;
-#define USTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_MASK        0xF
-#define USTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_SHIFT       0
-#define USTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_MASK           0x1
-#define USTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_SHIFT          4
-#define USTORM_ISCSI_TASK_AG_CTX_BIT1_MASK                   0x1
-#define USTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT                  5
-#define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_MASK          0x3
-#define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_SHIFT         6
-       u8 flags1;
-#define USTORM_ISCSI_TASK_AG_CTX_RESERVED1_MASK              0x3
-#define USTORM_ISCSI_TASK_AG_CTX_RESERVED1_SHIFT             0
-#define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_MASK               0x3
-#define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_SHIFT              2
-#define USTORM_ISCSI_TASK_AG_CTX_CF3_MASK                    0x3
-#define USTORM_ISCSI_TASK_AG_CTX_CF3_SHIFT                   4
-#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_MASK           0x3
-#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_SHIFT          6
-       u8 flags2;
-#define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_EN_MASK       0x1
-#define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_EN_SHIFT      0
-#define USTORM_ISCSI_TASK_AG_CTX_DISABLE_DATA_ACKED_MASK     0x1
-#define USTORM_ISCSI_TASK_AG_CTX_DISABLE_DATA_ACKED_SHIFT    1
-#define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_EN_MASK            0x1
-#define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_EN_SHIFT           2
-#define USTORM_ISCSI_TASK_AG_CTX_CF3EN_MASK                  0x1
-#define USTORM_ISCSI_TASK_AG_CTX_CF3EN_SHIFT                 3
-#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK        0x1
-#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT       4
-#define USTORM_ISCSI_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_MASK  0x1
-#define USTORM_ISCSI_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_SHIFT 5
-#define USTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK                0x1
-#define USTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT               6
-#define USTORM_ISCSI_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_MASK    0x1
-#define USTORM_ISCSI_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_SHIFT   7
-       u8 flags3;
-#define USTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK                0x1
-#define USTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT               0
-#define USTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK                0x1
-#define USTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT               1
-#define USTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK                0x1
-#define USTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT               2
-#define USTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK                0x1
-#define USTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT               3
-#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_TYPE_MASK         0xF
-#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT        4
-       __le32 dif_err_intervals;
-       __le32 dif_error_1st_interval;
-       __le32 rcv_cont_len;
-       __le32 exp_cont_len;
-       __le32 total_data_acked;
-       __le32 exp_data_acked;
-       u8 next_tid_valid;
-       u8 byte3;
-       __le16 word1;
-       __le16 next_tid;
-       __le16 word3;
-       __le32 hdr_residual_count;
-       __le32 exp_r2t_sn;
-};
-
-struct mstorm_iscsi_task_st_ctx {
-       struct scsi_cached_sges data_desc;
-       struct scsi_sgl_params sgl_params;
-       __le32 rem_task_size;
-       __le32 data_buffer_offset;
-       u8 task_type;
-       struct iscsi_dif_flags dif_flags;
-       u8 reserved0[2];
-       struct regpair sense_db;
-       __le32 expected_itt;
-       __le32 reserved1;
-};
-
-struct ustorm_iscsi_task_st_ctx {
-       __le32 rem_rcv_len;
-       __le32 exp_data_transfer_len;
-       __le32 exp_data_sn;
-       struct regpair lun;
-       struct iscsi_reg1 reg1;
-       u8 flags2;
-#define USTORM_ISCSI_TASK_ST_CTX_AHS_EXIST_MASK             0x1
-#define USTORM_ISCSI_TASK_ST_CTX_AHS_EXIST_SHIFT            0
-#define USTORM_ISCSI_TASK_ST_CTX_RESERVED1_MASK             0x7F
-#define USTORM_ISCSI_TASK_ST_CTX_RESERVED1_SHIFT            1
-       struct iscsi_dif_flags dif_flags;
-       __le16 reserved3;
-       __le32 reserved4;
-       __le32 reserved5;
-       __le32 reserved6;
-       __le32 reserved7;
-       u8 task_type;
-       u8 error_flags;
-#define USTORM_ISCSI_TASK_ST_CTX_DATA_DIGEST_ERROR_MASK     0x1
-#define USTORM_ISCSI_TASK_ST_CTX_DATA_DIGEST_ERROR_SHIFT    0
-#define USTORM_ISCSI_TASK_ST_CTX_DATA_TRUNCATED_ERROR_MASK  0x1
-#define USTORM_ISCSI_TASK_ST_CTX_DATA_TRUNCATED_ERROR_SHIFT 1
-#define USTORM_ISCSI_TASK_ST_CTX_UNDER_RUN_ERROR_MASK       0x1
-#define USTORM_ISCSI_TASK_ST_CTX_UNDER_RUN_ERROR_SHIFT      2
-#define USTORM_ISCSI_TASK_ST_CTX_RESERVED8_MASK             0x1F
-#define USTORM_ISCSI_TASK_ST_CTX_RESERVED8_SHIFT            3
-       u8 flags;
-#define USTORM_ISCSI_TASK_ST_CTX_CQE_WRITE_MASK             0x3
-#define USTORM_ISCSI_TASK_ST_CTX_CQE_WRITE_SHIFT            0
-#define USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP_MASK            0x1
-#define USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP_SHIFT           2
-#define USTORM_ISCSI_TASK_ST_CTX_Q0_R2TQE_WRITE_MASK        0x1
-#define USTORM_ISCSI_TASK_ST_CTX_Q0_R2TQE_WRITE_SHIFT       3
-#define USTORM_ISCSI_TASK_ST_CTX_TOTAL_DATA_ACKED_DONE_MASK  0x1
-#define USTORM_ISCSI_TASK_ST_CTX_TOTAL_DATA_ACKED_DONE_SHIFT 4
-#define USTORM_ISCSI_TASK_ST_CTX_HQ_SCANNED_DONE_MASK        0x1
-#define USTORM_ISCSI_TASK_ST_CTX_HQ_SCANNED_DONE_SHIFT       5
-#define USTORM_ISCSI_TASK_ST_CTX_R2T2RECV_DONE_MASK         0x1
-#define USTORM_ISCSI_TASK_ST_CTX_R2T2RECV_DONE_SHIFT        6
-#define USTORM_ISCSI_TASK_ST_CTX_RESERVED0_MASK             0x1
-#define USTORM_ISCSI_TASK_ST_CTX_RESERVED0_SHIFT            7
-       u8 cq_rss_number;
-};
-
-struct iscsi_task_context {
-       struct ystorm_iscsi_task_st_ctx ystorm_st_context;
-       struct ystorm_iscsi_task_ag_ctx ystorm_ag_context;
-       struct regpair ystorm_ag_padding[2];
-       struct tdif_task_context tdif_context;
-       struct mstorm_iscsi_task_ag_ctx mstorm_ag_context;
-       struct regpair mstorm_ag_padding[2];
-       struct ustorm_iscsi_task_ag_ctx ustorm_ag_context;
-       struct mstorm_iscsi_task_st_ctx mstorm_st_context;
-       struct ustorm_iscsi_task_st_ctx ustorm_st_context;
-       struct rdif_task_context rdif_context;
-};
-
+/* iSCSI task type */
 enum iscsi_task_type {
        ISCSI_TASK_TYPE_INITIATOR_WRITE,
        ISCSI_TASK_TYPE_INITIATOR_READ,
@@ -1186,53 +1342,57 @@ enum iscsi_task_type {
        ISCSI_TASK_TYPE_TARGET_READ,
        ISCSI_TASK_TYPE_TARGET_RESPONSE,
        ISCSI_TASK_TYPE_LOGIN_RESPONSE,
+       ISCSI_TASK_TYPE_TARGET_IMM_W_DIF,
        MAX_ISCSI_TASK_TYPE
 };
 
+/* iSCSI DesiredDataTransferLength/ttt union */
 union iscsi_ttt_txlen_union {
        __le32 desired_tx_len;
        __le32 ttt;
 };
 
+/* iSCSI uHQ element */
 struct iscsi_uhqe {
        __le32 reg1;
-#define ISCSI_UHQE_PDU_PAYLOAD_LEN_MASK     0xFFFFF
-#define ISCSI_UHQE_PDU_PAYLOAD_LEN_SHIFT    0
-#define ISCSI_UHQE_LOCAL_COMP_MASK          0x1
-#define ISCSI_UHQE_LOCAL_COMP_SHIFT         20
-#define ISCSI_UHQE_TOGGLE_BIT_MASK          0x1
-#define ISCSI_UHQE_TOGGLE_BIT_SHIFT         21
-#define ISCSI_UHQE_PURE_PAYLOAD_MASK        0x1
-#define ISCSI_UHQE_PURE_PAYLOAD_SHIFT       22
-#define ISCSI_UHQE_LOGIN_RESPONSE_PDU_MASK  0x1
-#define ISCSI_UHQE_LOGIN_RESPONSE_PDU_SHIFT 23
-#define ISCSI_UHQE_TASK_ID_HI_MASK          0xFF
-#define ISCSI_UHQE_TASK_ID_HI_SHIFT         24
+#define ISCSI_UHQE_PDU_PAYLOAD_LEN_MASK                0xFFFFF
+#define ISCSI_UHQE_PDU_PAYLOAD_LEN_SHIFT       0
+#define ISCSI_UHQE_LOCAL_COMP_MASK             0x1
+#define ISCSI_UHQE_LOCAL_COMP_SHIFT            20
+#define ISCSI_UHQE_TOGGLE_BIT_MASK             0x1
+#define ISCSI_UHQE_TOGGLE_BIT_SHIFT            21
+#define ISCSI_UHQE_PURE_PAYLOAD_MASK           0x1
+#define ISCSI_UHQE_PURE_PAYLOAD_SHIFT          22
+#define ISCSI_UHQE_LOGIN_RESPONSE_PDU_MASK     0x1
+#define ISCSI_UHQE_LOGIN_RESPONSE_PDU_SHIFT    23
+#define ISCSI_UHQE_TASK_ID_HI_MASK             0xFF
+#define ISCSI_UHQE_TASK_ID_HI_SHIFT            24
        __le32 reg2;
-#define ISCSI_UHQE_BUFFER_OFFSET_MASK       0xFFFFFF
-#define ISCSI_UHQE_BUFFER_OFFSET_SHIFT      0
-#define ISCSI_UHQE_TASK_ID_LO_MASK          0xFF
-#define ISCSI_UHQE_TASK_ID_LO_SHIFT         24
+#define ISCSI_UHQE_BUFFER_OFFSET_MASK  0xFFFFFF
+#define ISCSI_UHQE_BUFFER_OFFSET_SHIFT 0
+#define ISCSI_UHQE_TASK_ID_LO_MASK     0xFF
+#define ISCSI_UHQE_TASK_ID_LO_SHIFT    24
 };
 
-
+/* iSCSI WQ element */
 struct iscsi_wqe {
        __le16 task_id;
        u8 flags;
-#define ISCSI_WQE_WQE_TYPE_MASK        0x7
-#define ISCSI_WQE_WQE_TYPE_SHIFT       0
-#define ISCSI_WQE_NUM_SGES_MASK  0xF
-#define ISCSI_WQE_NUM_SGES_SHIFT 3
-#define ISCSI_WQE_RESPONSE_MASK        0x1
-#define ISCSI_WQE_RESPONSE_SHIFT       7
+#define ISCSI_WQE_WQE_TYPE_MASK                0x7
+#define ISCSI_WQE_WQE_TYPE_SHIFT       0
+#define ISCSI_WQE_NUM_SGES_MASK                0xF
+#define ISCSI_WQE_NUM_SGES_SHIFT       3
+#define ISCSI_WQE_RESPONSE_MASK                0x1
+#define ISCSI_WQE_RESPONSE_SHIFT       7
        struct iscsi_dif_flags prot_flags;
        __le32 contlen_cdbsize;
-#define ISCSI_WQE_CONT_LEN_MASK  0xFFFFFF
-#define ISCSI_WQE_CONT_LEN_SHIFT 0
-#define ISCSI_WQE_CDB_SIZE_MASK  0xFF
-#define ISCSI_WQE_CDB_SIZE_SHIFT 24
+#define ISCSI_WQE_CONT_LEN_MASK                0xFFFFFF
+#define ISCSI_WQE_CONT_LEN_SHIFT       0
+#define ISCSI_WQE_CDB_SIZE_MASK                0xFF
+#define ISCSI_WQE_CDB_SIZE_SHIFT       24
 };
 
+/* iSCSI wqe type */
 enum iscsi_wqe_type {
        ISCSI_WQE_TYPE_NORMAL,
        ISCSI_WQE_TYPE_TASK_CLEANUP,
@@ -1244,6 +1404,7 @@ enum iscsi_wqe_type {
        MAX_ISCSI_WQE_TYPE
 };
 
+/* iSCSI xHQ element */
 struct iscsi_xhqe {
        union iscsi_ttt_txlen_union ttt_or_txlen;
        __le32 exp_stat_sn;
@@ -1251,120 +1412,134 @@ struct iscsi_xhqe {
        u8 total_ahs_length;
        u8 opcode;
        u8 flags;
-#define ISCSI_XHQE_FINAL_MASK       0x1
-#define ISCSI_XHQE_FINAL_SHIFT      0
-#define ISCSI_XHQE_STATUS_BIT_MASK  0x1
-#define ISCSI_XHQE_STATUS_BIT_SHIFT 1
-#define ISCSI_XHQE_NUM_SGES_MASK    0xF
-#define ISCSI_XHQE_NUM_SGES_SHIFT   2
-#define ISCSI_XHQE_RESERVED0_MASK   0x3
-#define ISCSI_XHQE_RESERVED0_SHIFT  6
+#define ISCSI_XHQE_FINAL_MASK          0x1
+#define ISCSI_XHQE_FINAL_SHIFT         0
+#define ISCSI_XHQE_STATUS_BIT_MASK     0x1
+#define ISCSI_XHQE_STATUS_BIT_SHIFT    1
+#define ISCSI_XHQE_NUM_SGES_MASK       0xF
+#define ISCSI_XHQE_NUM_SGES_SHIFT      2
+#define ISCSI_XHQE_RESERVED0_MASK      0x3
+#define ISCSI_XHQE_RESERVED0_SHIFT     6
        union iscsi_seq_num seq_num;
        __le16 reserved1;
 };
 
+/* Per PF iSCSI receive path statistics - mStorm RAM structure */
 struct mstorm_iscsi_stats_drv {
        struct regpair iscsi_rx_dropped_pdus_task_not_valid;
+       struct regpair iscsi_rx_dup_ack_cnt;
 };
 
+/* Per PF iSCSI transmit path statistics - pStorm RAM structure */
 struct pstorm_iscsi_stats_drv {
        struct regpair iscsi_tx_bytes_cnt;
        struct regpair iscsi_tx_packet_cnt;
 };
 
+/* Per PF iSCSI receive path statistics - tStorm RAM structure */
 struct tstorm_iscsi_stats_drv {
        struct regpair iscsi_rx_bytes_cnt;
        struct regpair iscsi_rx_packet_cnt;
        struct regpair iscsi_rx_new_ooo_isle_events_cnt;
+       struct regpair iscsi_rx_tcp_payload_bytes_cnt;
+       struct regpair iscsi_rx_tcp_pkt_cnt;
+       struct regpair iscsi_rx_pure_ack_cnt;
        __le32 iscsi_cmdq_threshold_cnt;
        __le32 iscsi_rq_threshold_cnt;
        __le32 iscsi_immq_threshold_cnt;
 };
 
+/* Per PF iSCSI receive path statistics - uStorm RAM structure */
 struct ustorm_iscsi_stats_drv {
        struct regpair iscsi_rx_data_pdu_cnt;
        struct regpair iscsi_rx_r2t_pdu_cnt;
        struct regpair iscsi_rx_total_pdu_cnt;
 };
 
+/* Per PF iSCSI transmit path statistics - xStorm RAM structure */
 struct xstorm_iscsi_stats_drv {
        struct regpair iscsi_tx_go_to_slow_start_event_cnt;
        struct regpair iscsi_tx_fast_retransmit_event_cnt;
+       struct regpair iscsi_tx_pure_ack_cnt;
+       struct regpair iscsi_tx_delayed_ack_cnt;
 };
 
+/* Per PF iSCSI transmit path statistics - yStorm RAM structure */
 struct ystorm_iscsi_stats_drv {
        struct regpair iscsi_tx_data_pdu_cnt;
        struct regpair iscsi_tx_r2t_pdu_cnt;
        struct regpair iscsi_tx_total_pdu_cnt;
+       struct regpair iscsi_tx_tcp_payload_bytes_cnt;
+       struct regpair iscsi_tx_tcp_pkt_cnt;
 };
 
-struct tstorm_iscsi_task_ag_ctx {
+struct e4_tstorm_iscsi_task_ag_ctx {
        u8 byte0;
        u8 byte1;
        __le16 word0;
        u8 flags0;
-#define TSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_MASK  0xF
-#define TSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_SHIFT 0
-#define TSTORM_ISCSI_TASK_AG_CTX_BIT0_MASK     0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_BIT0_SHIFT    4
-#define TSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK     0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT    5
-#define TSTORM_ISCSI_TASK_AG_CTX_BIT2_MASK     0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_BIT2_SHIFT    6
-#define TSTORM_ISCSI_TASK_AG_CTX_BIT3_MASK     0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_BIT3_SHIFT    7
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_MASK       0xF
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_SHIFT      0
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT0_MASK          0x1
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT0_SHIFT         4
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK          0x1
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT         5
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT2_MASK          0x1
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT2_SHIFT         6
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT3_MASK          0x1
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT3_SHIFT         7
        u8 flags1;
-#define TSTORM_ISCSI_TASK_AG_CTX_BIT4_MASK     0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_BIT4_SHIFT    0
-#define TSTORM_ISCSI_TASK_AG_CTX_BIT5_MASK     0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_BIT5_SHIFT    1
-#define TSTORM_ISCSI_TASK_AG_CTX_CF0_MASK      0x3
-#define TSTORM_ISCSI_TASK_AG_CTX_CF0_SHIFT     2
-#define TSTORM_ISCSI_TASK_AG_CTX_CF1_MASK      0x3
-#define TSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT     4
-#define TSTORM_ISCSI_TASK_AG_CTX_CF2_MASK      0x3
-#define TSTORM_ISCSI_TASK_AG_CTX_CF2_SHIFT     6
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT4_MASK  0x1
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT4_SHIFT 0
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT5_MASK  0x1
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT5_SHIFT 1
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF0_MASK   0x3
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF0_SHIFT  2
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF1_MASK   0x3
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT  4
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF2_MASK   0x3
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF2_SHIFT  6
        u8 flags2;
-#define TSTORM_ISCSI_TASK_AG_CTX_CF3_MASK      0x3
-#define TSTORM_ISCSI_TASK_AG_CTX_CF3_SHIFT     0
-#define TSTORM_ISCSI_TASK_AG_CTX_CF4_MASK      0x3
-#define TSTORM_ISCSI_TASK_AG_CTX_CF4_SHIFT     2
-#define TSTORM_ISCSI_TASK_AG_CTX_CF5_MASK      0x3
-#define TSTORM_ISCSI_TASK_AG_CTX_CF5_SHIFT     4
-#define TSTORM_ISCSI_TASK_AG_CTX_CF6_MASK      0x3
-#define TSTORM_ISCSI_TASK_AG_CTX_CF6_SHIFT     6
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF3_MASK   0x3
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF3_SHIFT  0
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF4_MASK   0x3
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF4_SHIFT  2
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF5_MASK   0x3
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF5_SHIFT  4
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF6_MASK   0x3
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF6_SHIFT  6
        u8 flags3;
-#define TSTORM_ISCSI_TASK_AG_CTX_CF7_MASK      0x3
-#define TSTORM_ISCSI_TASK_AG_CTX_CF7_SHIFT     0
-#define TSTORM_ISCSI_TASK_AG_CTX_CF0EN_MASK    0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_CF0EN_SHIFT   2
-#define TSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK    0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT   3
-#define TSTORM_ISCSI_TASK_AG_CTX_CF2EN_MASK    0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_CF2EN_SHIFT   4
-#define TSTORM_ISCSI_TASK_AG_CTX_CF3EN_MASK    0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_CF3EN_SHIFT   5
-#define TSTORM_ISCSI_TASK_AG_CTX_CF4EN_MASK    0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_CF4EN_SHIFT   6
-#define TSTORM_ISCSI_TASK_AG_CTX_CF5EN_MASK    0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_CF5EN_SHIFT   7
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF7_MASK   0x3
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF7_SHIFT  0
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF0EN_MASK 0x1
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF0EN_SHIFT        2
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT        3
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF2EN_MASK 0x1
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF2EN_SHIFT        4
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF3EN_MASK 0x1
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF3EN_SHIFT        5
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF4EN_MASK 0x1
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF4EN_SHIFT        6
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF5EN_MASK 0x1
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF5EN_SHIFT        7
        u8 flags4;
-#define TSTORM_ISCSI_TASK_AG_CTX_CF6EN_MASK    0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_CF6EN_SHIFT   0
-#define TSTORM_ISCSI_TASK_AG_CTX_CF7EN_MASK    0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_CF7EN_SHIFT   1
-#define TSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK  0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 2
-#define TSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK  0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 3
-#define TSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK  0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 4
-#define TSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK  0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 5
-#define TSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK  0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 6
-#define TSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK  0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 7
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF6EN_MASK         0x1
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF6EN_SHIFT                0
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF7EN_MASK         0x1
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF7EN_SHIFT                1
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK       0x1
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT      2
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK       0x1
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT      3
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK       0x1
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT      4
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK       0x1
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT      5
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK       0x1
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT      6
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK       0x1
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT      7
        u8 byte2;
        __le16 word1;
        __le32 reg0;
@@ -1376,18 +1551,20 @@ struct tstorm_iscsi_task_ag_ctx {
        __le32 reg1;
        __le32 reg2;
 };
+
+/* iSCSI doorbell data */
 struct iscsi_db_data {
        u8 params;
-#define ISCSI_DB_DATA_DEST_MASK         0x3
-#define ISCSI_DB_DATA_DEST_SHIFT        0
-#define ISCSI_DB_DATA_AGG_CMD_MASK      0x3
-#define ISCSI_DB_DATA_AGG_CMD_SHIFT     2
-#define ISCSI_DB_DATA_BYPASS_EN_MASK    0x1
-#define ISCSI_DB_DATA_BYPASS_EN_SHIFT   4
-#define ISCSI_DB_DATA_RESERVED_MASK     0x1
-#define ISCSI_DB_DATA_RESERVED_SHIFT    5
-#define ISCSI_DB_DATA_AGG_VAL_SEL_MASK  0x3
-#define ISCSI_DB_DATA_AGG_VAL_SEL_SHIFT 6
+#define ISCSI_DB_DATA_DEST_MASK                0x3
+#define ISCSI_DB_DATA_DEST_SHIFT       0
+#define ISCSI_DB_DATA_AGG_CMD_MASK     0x3
+#define ISCSI_DB_DATA_AGG_CMD_SHIFT    2
+#define ISCSI_DB_DATA_BYPASS_EN_MASK   0x1
+#define ISCSI_DB_DATA_BYPASS_EN_SHIFT  4
+#define ISCSI_DB_DATA_RESERVED_MASK    0x1
+#define ISCSI_DB_DATA_RESERVED_SHIFT   5
+#define ISCSI_DB_DATA_AGG_VAL_SEL_MASK 0x3
+#define ISCSI_DB_DATA_AGG_VAL_SEL_SHIFT        6
        u8 agg_flags;
        __le16 sq_prod;
 };
index b8b3e1c..c6cfd39 100644 (file)
  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
  */
+
 #ifndef __IWARP_COMMON__
 #define __IWARP_COMMON__
+
 #include <linux/qed/rdma_common.h>
+
 /************************/
 /* IWARP FW CONSTANTS  */
 /************************/
 #define IWARP_PASSIVE_MODE 1
 
 #define IWARP_SHARED_QUEUE_PAGE_SIZE           (0x8000)
-#define IWARP_SHARED_QUEUE_PAGE_RQ_PBL_OFFSET   (0x4000)
-#define IWARP_SHARED_QUEUE_PAGE_RQ_PBL_MAX_SIZE (0x1000)
-#define IWARP_SHARED_QUEUE_PAGE_SQ_PBL_OFFSET   (0x5000)
-#define IWARP_SHARED_QUEUE_PAGE_SQ_PBL_MAX_SIZE (0x3000)
+#define IWARP_SHARED_QUEUE_PAGE_RQ_PBL_OFFSET  (0x4000)
+#define IWARP_SHARED_QUEUE_PAGE_RQ_PBL_MAX_SIZE        (0x1000)
+#define IWARP_SHARED_QUEUE_PAGE_SQ_PBL_OFFSET  (0x5000)
+#define IWARP_SHARED_QUEUE_PAGE_SQ_PBL_MAX_SIZE        (0x3000)
 
-#define IWARP_REQ_MAX_INLINE_DATA_SIZE          (128)
-#define IWARP_REQ_MAX_SINGLE_SQ_WQE_SIZE        (176)
+#define IWARP_REQ_MAX_INLINE_DATA_SIZE         (128)
+#define IWARP_REQ_MAX_SINGLE_SQ_WQE_SIZE       (176)
 
-#define IWARP_MAX_QPS                           (64 * 1024)
+#define IWARP_MAX_QPS                          (64 * 1024)
 
 #endif /* __IWARP_COMMON__ */
index d60de4a..147d08c 100644 (file)
@@ -61,6 +61,35 @@ struct qed_txq_start_ret_params {
        void *p_handle;
 };
 
+enum qed_filter_config_mode {
+       QED_FILTER_CONFIG_MODE_DISABLE,
+       QED_FILTER_CONFIG_MODE_5_TUPLE,
+       QED_FILTER_CONFIG_MODE_L4_PORT,
+       QED_FILTER_CONFIG_MODE_IP_DEST,
+};
+
+struct qed_ntuple_filter_params {
+       /* Physically mapped address containing header of buffer to be used
+        * as filter.
+        */
+       dma_addr_t addr;
+
+       /* Length of header in bytes */
+       u16 length;
+
+       /* Relative queue-id to receive classified packet */
+#define QED_RFS_NTUPLE_QID_RSS ((u16)-1)
+       u16 qid;
+
+       /* Identifier can either be according to vport-id or vfid */
+       bool b_is_vf;
+       u8 vport_id;
+       u8 vf_id;
+
+       /* true iff this filter is to be added. Else to be removed */
+       bool b_is_add;
+};
+
 struct qed_dev_eth_info {
        struct qed_dev_info common;
 
@@ -316,13 +345,12 @@ struct qed_eth_ops {
        int (*tunn_config)(struct qed_dev *cdev,
                           struct qed_tunn_params *params);
 
-       int (*ntuple_filter_config)(struct qed_dev *cdev, void *cookie,
-                                   dma_addr_t mapping, u16 length,
-                                   u16 vport_id, u16 rx_queue_id,
-                                   bool add_filter);
+       int (*ntuple_filter_config)(struct qed_dev *cdev,
+                                   void *cookie,
+                                   struct qed_ntuple_filter_params *params);
 
        int (*configure_arfs_searcher)(struct qed_dev *cdev,
-                                      bool en_searcher);
+                                      enum qed_filter_config_mode mode);
        int (*get_coalesce)(struct qed_dev *cdev, u16 *coal, void *handle);
 };
 
index cc646ca..15e398c 100644 (file)
@@ -244,16 +244,11 @@ struct qed_fcoe_pf_params {
 /* Most of the the parameters below are described in the FW iSCSI / TCP HSI */
 struct qed_iscsi_pf_params {
        u64 glbl_q_params_addr;
-       u64 bdq_pbl_base_addr[2];
-       u32 max_cwnd;
+       u64 bdq_pbl_base_addr[3];
        u16 cq_num_entries;
        u16 cmdq_num_entries;
        u32 two_msl_timer;
-       u16 dup_ack_threshold;
        u16 tx_sws_timer;
-       u16 min_rto;
-       u16 min_rto_rt;
-       u16 max_rto;
 
        /* The following parameters are used during HW-init
         * and these parameters need to be passed as arguments
@@ -264,8 +259,8 @@ struct qed_iscsi_pf_params {
 
        /* The following parameters are used during protocol-init */
        u16 half_way_close_timeout;
-       u16 bdq_xoff_threshold[2];
-       u16 bdq_xon_threshold[2];
+       u16 bdq_xoff_threshold[3];
+       u16 bdq_xon_threshold[3];
        u16 cmdq_xoff_threshold;
        u16 cmdq_xon_threshold;
        u16 rq_buffer_size;
@@ -281,10 +276,11 @@ struct qed_iscsi_pf_params {
        u8 gl_cmd_pi;
        u8 debug_mode;
        u8 ll2_ooo_queue_id;
-       u8 ooo_enable;
 
        u8 is_target;
-       u8 bdq_pbl_num_entries[2];
+       u8 is_soc_en;
+       u8 soc_num_of_blocks_log;
+       u8 bdq_pbl_num_entries[3];
 };
 
 struct qed_rdma_pf_params {
@@ -316,16 +312,16 @@ enum qed_int_mode {
 };
 
 struct qed_sb_info {
-       struct status_block     *sb_virt;
-       dma_addr_t              sb_phys;
-       u32                     sb_ack; /* Last given ack */
-       u16                     igu_sb_id;
-       void __iomem            *igu_addr;
-       u8                      flags;
-#define QED_SB_INFO_INIT        0x1
-#define QED_SB_INFO_SETUP       0x2
+       struct status_block_e4 *sb_virt;
+       dma_addr_t sb_phys;
+       u32 sb_ack; /* Last given ack */
+       u16 igu_sb_id;
+       void __iomem *igu_addr;
+       u8 flags;
+#define QED_SB_INFO_INIT       0x1
+#define QED_SB_INFO_SETUP      0x2
 
-       struct qed_dev          *cdev;
+       struct qed_dev *cdev;
 };
 
 enum qed_dev_type {
@@ -939,7 +935,7 @@ static inline u16 qed_sb_update_sb_idx(struct qed_sb_info *sb_info)
        u16 rc = 0;
 
        prod = le32_to_cpu(sb_info->sb_virt->prod_index) &
-              STATUS_BLOCK_PROD_INDEX_MASK;
+              STATUS_BLOCK_E4_PROD_INDEX_MASK;
        if (sb_info->sb_ack != prod) {
                sb_info->sb_ack = prod;
                rc |= QED_SB_IDX;
index 111e606..d0df1be 100644 (file)
@@ -102,7 +102,6 @@ struct qed_iscsi_params_offload {
        u32 ss_thresh;
        u16 srtt;
        u16 rtt_var;
-       u32 ts_time;
        u32 ts_recent;
        u32 ts_recent_age;
        u32 total_rt;
@@ -124,7 +123,6 @@ struct qed_iscsi_params_offload {
        u16 mss;
        u8 snd_wnd_scale;
        u8 rcv_wnd_scale;
-       u32 ts_ticks_per_second;
        u16 da_timeout_value;
        u8 ack_frequency;
 };
index e755954..266c1fb 100644 (file)
@@ -116,7 +116,7 @@ struct qed_ll2_comp_rx_data {
        u32 opaque_data_1;
 
        /* GSI only */
-       u32 gid_dst[4];
+       u32 src_qp;
        u16 qp_id;
 
        union {
index a9b3050..c1a446e 100644 (file)
 
 #ifndef __RDMA_COMMON__
 #define __RDMA_COMMON__
+
 /************************/
 /* RDMA FW CONSTANTS */
 /************************/
 
-#define RDMA_RESERVED_LKEY                      (0)
-#define RDMA_RING_PAGE_SIZE                     (0x1000)
+#define RDMA_RESERVED_LKEY             (0)
+#define RDMA_RING_PAGE_SIZE            (0x1000)
 
-#define RDMA_MAX_SGE_PER_SQ_WQE         (4)
-#define RDMA_MAX_SGE_PER_RQ_WQE         (4)
+#define RDMA_MAX_SGE_PER_SQ_WQE                (4)
+#define RDMA_MAX_SGE_PER_RQ_WQE                (4)
 
 #define RDMA_MAX_DATA_SIZE_IN_WQE      (0x80000000)
 
-#define RDMA_REQ_RD_ATOMIC_ELM_SIZE             (0x50)
-#define RDMA_RESP_RD_ATOMIC_ELM_SIZE    (0x20)
+#define RDMA_REQ_RD_ATOMIC_ELM_SIZE    (0x50)
+#define RDMA_RESP_RD_ATOMIC_ELM_SIZE   (0x20)
 
-#define RDMA_MAX_CQS                            (64 * 1024)
-#define RDMA_MAX_TIDS                           (128 * 1024 - 1)
-#define RDMA_MAX_PDS                            (64 * 1024)
+#define RDMA_MAX_CQS                   (64 * 1024)
+#define RDMA_MAX_TIDS                  (128 * 1024 - 1)
+#define RDMA_MAX_PDS                   (64 * 1024)
 
-#define RDMA_NUM_STATISTIC_COUNTERS                     MAX_NUM_VPORTS
-#define RDMA_NUM_STATISTIC_COUNTERS_K2                  MAX_NUM_VPORTS_K2
-#define RDMA_NUM_STATISTIC_COUNTERS_BB                  MAX_NUM_VPORTS_BB
+#define RDMA_NUM_STATISTIC_COUNTERS    MAX_NUM_VPORTS
+#define RDMA_NUM_STATISTIC_COUNTERS_K2 MAX_NUM_VPORTS_K2
+#define RDMA_NUM_STATISTIC_COUNTERS_BB MAX_NUM_VPORTS_BB
 
 #define RDMA_TASK_TYPE (PROTOCOLID_ROCE)
 
index fe6a33e..e15e0da 100644 (file)
 #ifndef __ROCE_COMMON__
 #define __ROCE_COMMON__
 
-#define ROCE_REQ_MAX_INLINE_DATA_SIZE (256)
-#define ROCE_REQ_MAX_SINGLE_SQ_WQE_SIZE (288)
+/************************/
+/* ROCE FW CONSTANTS */
+/************************/
 
-#define ROCE_MAX_QPS   (32 * 1024)
-#define ROCE_DCQCN_NP_MAX_QPS  (64)
-#define ROCE_DCQCN_RP_MAX_QPS  (64)
+#define ROCE_REQ_MAX_INLINE_DATA_SIZE  (256)
+#define ROCE_REQ_MAX_SINGLE_SQ_WQE_SIZE        (288)
 
+#define ROCE_MAX_QPS                   (32 * 1024)
+#define ROCE_DCQCN_NP_MAX_QPS          (64)
+#define ROCE_DCQCN_RP_MAX_QPS          (64)
+
+/* Affiliated asynchronous events / errors enumeration */
 enum roce_async_events_type {
        ROCE_ASYNC_EVENT_NONE = 0,
        ROCE_ASYNC_EVENT_COMM_EST = 1,
index 08df82a..505c0b4 100644 (file)
 #ifndef __STORAGE_COMMON__
 #define __STORAGE_COMMON__
 
-#define NUM_OF_CMDQS_CQS (NUM_OF_GLOBAL_QUEUES / 2)
-#define BDQ_NUM_RESOURCES (4)
-
-#define BDQ_ID_RQ                        (0)
-#define BDQ_ID_IMM_DATA          (1)
-#define BDQ_NUM_IDS          (2)
-
-#define SCSI_NUM_SGES_SLOW_SGL_THR      8
+/*********************/
+/* SCSI CONSTANTS */
+/*********************/
+
+#define SCSI_MAX_NUM_OF_CMDQS          (NUM_OF_GLOBAL_QUEUES / 2)
+#define BDQ_NUM_RESOURCES              (4)
+
+#define BDQ_ID_RQ                      (0)
+#define BDQ_ID_IMM_DATA                        (1)
+#define BDQ_ID_TQ                      (2)
+#define BDQ_NUM_IDS                    (3)
+
+#define SCSI_NUM_SGES_SLOW_SGL_THR     8
+
+#define BDQ_MAX_EXTERNAL_RING_SIZE     BIT(15)
+
+/* SCSI op codes */
+#define SCSI_OPCODE_COMPARE_AND_WRITE  (0x89)
+#define SCSI_OPCODE_READ_10            (0x28)
+#define SCSI_OPCODE_WRITE_6            (0x0A)
+#define SCSI_OPCODE_WRITE_10           (0x2A)
+#define SCSI_OPCODE_WRITE_12           (0xAA)
+#define SCSI_OPCODE_WRITE_16           (0x8A)
+#define SCSI_OPCODE_WRITE_AND_VERIFY_10        (0x2E)
+#define SCSI_OPCODE_WRITE_AND_VERIFY_12        (0xAE)
+#define SCSI_OPCODE_WRITE_AND_VERIFY_16        (0x8E)
+
+/* iSCSI Drv opaque */
+struct iscsi_drv_opaque {
+       __le16 reserved_zero[3];
+       __le16 opaque;
+};
 
-#define BDQ_MAX_EXTERNAL_RING_SIZE (1 << 15)
+/* Scsi 2B/8B opaque union */
+union scsi_opaque {
+       struct regpair fcoe_opaque;
+       struct iscsi_drv_opaque iscsi_opaque;
+};
 
+/* SCSI buffer descriptor */
 struct scsi_bd {
        struct regpair address;
-       struct regpair opaque;
+       union scsi_opaque opaque;
 };
 
+/* Scsi Drv BDQ struct */
 struct scsi_bdq_ram_drv_data {
        __le16 external_producer;
        __le16 reserved0[3];
 };
 
+/* SCSI SGE entry */
 struct scsi_sge {
        struct regpair sge_addr;
        __le32 sge_len;
        __le32 reserved;
 };
 
+/* Cached SGEs section */
 struct scsi_cached_sges {
        struct scsi_sge sge[4];
 };
 
+/* Scsi Drv CMDQ struct */
 struct scsi_drv_cmdq {
        __le16 cmdq_cons;
        __le16 reserved0;
        __le32 reserved1;
 };
 
+/* Common SCSI init params passed by driver to FW in function init ramrod */
 struct scsi_init_func_params {
        __le16 num_tasks;
        u8 log_page_size;
@@ -77,6 +111,7 @@ struct scsi_init_func_params {
        u8 reserved2[12];
 };
 
+/* SCSI RQ/CQ/CMDQ firmware function init parameters */
 struct scsi_init_func_queues {
        struct regpair glbl_q_params_addr;
        __le16 rq_buffer_size;
@@ -84,39 +119,45 @@ struct scsi_init_func_queues {
        __le16 cmdq_num_entries;
        u8 bdq_resource_id;
        u8 q_validity;
-#define SCSI_INIT_FUNC_QUEUES_RQ_VALID_MASK        0x1
-#define SCSI_INIT_FUNC_QUEUES_RQ_VALID_SHIFT       0
-#define SCSI_INIT_FUNC_QUEUES_IMM_DATA_VALID_MASK  0x1
-#define SCSI_INIT_FUNC_QUEUES_IMM_DATA_VALID_SHIFT 1
-#define SCSI_INIT_FUNC_QUEUES_CMD_VALID_MASK       0x1
-#define SCSI_INIT_FUNC_QUEUES_CMD_VALID_SHIFT      2
-#define SCSI_INIT_FUNC_QUEUES_RESERVED_VALID_MASK  0x1F
-#define SCSI_INIT_FUNC_QUEUES_RESERVED_VALID_SHIFT 3
+#define SCSI_INIT_FUNC_QUEUES_RQ_VALID_MASK                    0x1
+#define SCSI_INIT_FUNC_QUEUES_RQ_VALID_SHIFT                   0
+#define SCSI_INIT_FUNC_QUEUES_IMM_DATA_VALID_MASK              0x1
+#define SCSI_INIT_FUNC_QUEUES_IMM_DATA_VALID_SHIFT             1
+#define SCSI_INIT_FUNC_QUEUES_CMD_VALID_MASK                   0x1
+#define SCSI_INIT_FUNC_QUEUES_CMD_VALID_SHIFT                  2
+#define SCSI_INIT_FUNC_QUEUES_TQ_VALID_MASK                    0x1
+#define SCSI_INIT_FUNC_QUEUES_TQ_VALID_SHIFT                   3
+#define SCSI_INIT_FUNC_QUEUES_SOC_EN_MASK                      0x1
+#define SCSI_INIT_FUNC_QUEUES_SOC_EN_SHIFT                     4
+#define SCSI_INIT_FUNC_QUEUES_SOC_NUM_OF_BLOCKS_LOG_MASK       0x7
+#define SCSI_INIT_FUNC_QUEUES_SOC_NUM_OF_BLOCKS_LOG_SHIFT      5
+       __le16 cq_cmdq_sb_num_arr[SCSI_MAX_NUM_OF_CMDQS];
        u8 num_queues;
        u8 queue_relative_offset;
        u8 cq_sb_pi;
        u8 cmdq_sb_pi;
-       __le16 cq_cmdq_sb_num_arr[NUM_OF_CMDQS_CQS];
-       __le16 reserved0;
        u8 bdq_pbl_num_entries[BDQ_NUM_IDS];
+       u8 reserved1;
        struct regpair bdq_pbl_base_address[BDQ_NUM_IDS];
        __le16 bdq_xoff_threshold[BDQ_NUM_IDS];
-       __le16 bdq_xon_threshold[BDQ_NUM_IDS];
        __le16 cmdq_xoff_threshold;
+       __le16 bdq_xon_threshold[BDQ_NUM_IDS];
        __le16 cmdq_xon_threshold;
-       __le32 reserved1;
 };
 
+/* Scsi Drv BDQ Data struct (2 BDQ IDs: 0 - RQ, 1 - Immediate Data) */
 struct scsi_ram_per_bdq_resource_drv_data {
        struct scsi_bdq_ram_drv_data drv_data_per_bdq_id[BDQ_NUM_IDS];
 };
 
+/* SCSI SGL types */
 enum scsi_sgl_mode {
        SCSI_TX_SLOW_SGL,
        SCSI_FAST_SGL,
        MAX_SCSI_SGL_MODE
 };
 
+/* SCSI SGL parameters */
 struct scsi_sgl_params {
        struct regpair sgl_addr;
        __le32 sgl_total_length;
@@ -126,10 +167,16 @@ struct scsi_sgl_params {
        u8 reserved;
 };
 
+/* SCSI terminate connection params */
 struct scsi_terminate_extra_params {
        __le16 unsolicited_cq_count;
        __le16 cmdq_count;
        u8 reserved[4];
 };
 
+/* SCSI Task Queue Element */
+struct scsi_tqe {
+       __le16 itid;
+};
+
 #endif /* __STORAGE_COMMON__ */
index dbf7a43..4a48451 100644 (file)
 #ifndef __TCP_COMMON__
 #define __TCP_COMMON__
 
-#define TCP_INVALID_TIMEOUT_VAL -1
+/********************/
+/* TCP FW CONSTANTS */
+/********************/
 
+#define TCP_INVALID_TIMEOUT_VAL        -1
+
+/* OOO opaque data received from LL2 */
 struct ooo_opaque {
        __le32 cid;
        u8 drop_isle;
@@ -43,25 +48,29 @@ struct ooo_opaque {
        u8 ooo_isle;
 };
 
+/* tcp connect mode enum */
 enum tcp_connect_mode {
        TCP_CONNECT_ACTIVE,
        TCP_CONNECT_PASSIVE,
        MAX_TCP_CONNECT_MODE
 };
 
+/* tcp function init parameters */
 struct tcp_init_params {
        __le32 two_msl_timer;
        __le16 tx_sws_timer;
-       u8 maxfinrt;
+       u8 max_fin_rt;
        u8 reserved[9];
 };
 
+/* tcp IPv4/IPv6 enum */
 enum tcp_ip_version {
        TCP_IPV4,
        TCP_IPV6,
        MAX_TCP_IP_VERSION
 };
 
+/* tcp offload parameters */
 struct tcp_offload_params {
        __le16 local_mac_addr_lo;
        __le16 local_mac_addr_mid;
@@ -70,24 +79,29 @@ struct tcp_offload_params {
        __le16 remote_mac_addr_mid;
        __le16 remote_mac_addr_hi;
        __le16 vlan_id;
-       u8 flags;
-#define TCP_OFFLOAD_PARAMS_TS_EN_MASK         0x1
-#define TCP_OFFLOAD_PARAMS_TS_EN_SHIFT        0
-#define TCP_OFFLOAD_PARAMS_DA_EN_MASK         0x1
-#define TCP_OFFLOAD_PARAMS_DA_EN_SHIFT        1
-#define TCP_OFFLOAD_PARAMS_KA_EN_MASK         0x1
-#define TCP_OFFLOAD_PARAMS_KA_EN_SHIFT        2
-#define TCP_OFFLOAD_PARAMS_NAGLE_EN_MASK      0x1
-#define TCP_OFFLOAD_PARAMS_NAGLE_EN_SHIFT     3
-#define TCP_OFFLOAD_PARAMS_DA_CNT_EN_MASK     0x1
-#define TCP_OFFLOAD_PARAMS_DA_CNT_EN_SHIFT    4
-#define TCP_OFFLOAD_PARAMS_FIN_SENT_MASK      0x1
-#define TCP_OFFLOAD_PARAMS_FIN_SENT_SHIFT     5
-#define TCP_OFFLOAD_PARAMS_FIN_RECEIVED_MASK  0x1
-#define TCP_OFFLOAD_PARAMS_FIN_RECEIVED_SHIFT 6
-#define TCP_OFFLOAD_PARAMS_RESERVED0_MASK     0x1
-#define TCP_OFFLOAD_PARAMS_RESERVED0_SHIFT    7
+       __le16 flags;
+#define TCP_OFFLOAD_PARAMS_TS_EN_MASK                  0x1
+#define TCP_OFFLOAD_PARAMS_TS_EN_SHIFT                 0
+#define TCP_OFFLOAD_PARAMS_DA_EN_MASK                  0x1
+#define TCP_OFFLOAD_PARAMS_DA_EN_SHIFT                 1
+#define TCP_OFFLOAD_PARAMS_KA_EN_MASK                  0x1
+#define TCP_OFFLOAD_PARAMS_KA_EN_SHIFT                 2
+#define TCP_OFFLOAD_PARAMS_ECN_SENDER_EN_MASK          0x1
+#define TCP_OFFLOAD_PARAMS_ECN_SENDER_EN_SHIFT         3
+#define TCP_OFFLOAD_PARAMS_ECN_RECEIVER_EN_MASK                0x1
+#define TCP_OFFLOAD_PARAMS_ECN_RECEIVER_EN_SHIFT       4
+#define TCP_OFFLOAD_PARAMS_NAGLE_EN_MASK               0x1
+#define TCP_OFFLOAD_PARAMS_NAGLE_EN_SHIFT              5
+#define TCP_OFFLOAD_PARAMS_DA_CNT_EN_MASK              0x1
+#define TCP_OFFLOAD_PARAMS_DA_CNT_EN_SHIFT             6
+#define TCP_OFFLOAD_PARAMS_FIN_SENT_MASK               0x1
+#define TCP_OFFLOAD_PARAMS_FIN_SENT_SHIFT              7
+#define TCP_OFFLOAD_PARAMS_FIN_RECEIVED_MASK           0x1
+#define TCP_OFFLOAD_PARAMS_FIN_RECEIVED_SHIFT          8
+#define TCP_OFFLOAD_PARAMS_RESERVED_MASK               0x7F
+#define TCP_OFFLOAD_PARAMS_RESERVED_SHIFT              9
        u8 ip_version;
+       u8 reserved0[3];
        __le32 remote_ip[4];
        __le32 local_ip[4];
        __le32 flow_label;
@@ -99,17 +113,21 @@ struct tcp_offload_params {
        u8 rcv_wnd_scale;
        u8 connect_mode;
        __le16 srtt;
-       __le32 cwnd;
        __le32 ss_thresh;
-       __le16 reserved1;
+       __le32 rcv_wnd;
+       __le32 cwnd;
        u8 ka_max_probe_cnt;
        u8 dup_ack_theshold;
+       __le16 reserved1;
+       __le32 ka_timeout;
+       __le32 ka_interval;
+       __le32 max_rt_time;
+       __le32 initial_rcv_wnd;
        __le32 rcv_next;
        __le32 snd_una;
        __le32 snd_next;
        __le32 snd_max;
        __le32 snd_wnd;
-       __le32 rcv_wnd;
        __le32 snd_wl1;
        __le32 ts_recent;
        __le32 ts_recent_age;
@@ -122,16 +140,13 @@ struct tcp_offload_params {
        u8 rt_cnt;
        __le16 rtt_var;
        __le16 fw_internal;
-       __le32 ka_timeout;
-       __le32 ka_interval;
-       __le32 max_rt_time;
-       __le32 initial_rcv_wnd;
        u8 snd_wnd_scale;
        u8 ack_frequency;
        __le16 da_timeout_value;
-       __le32 reserved3[2];
+       __le32 reserved3;
 };
 
+/* tcp offload parameters */
 struct tcp_offload_params_opt2 {
        __le16 local_mac_addr_lo;
        __le16 local_mac_addr_mid;
@@ -140,16 +155,19 @@ struct tcp_offload_params_opt2 {
        __le16 remote_mac_addr_mid;
        __le16 remote_mac_addr_hi;
        __le16 vlan_id;
-       u8 flags;
-#define TCP_OFFLOAD_PARAMS_OPT2_TS_EN_MASK      0x1
-#define TCP_OFFLOAD_PARAMS_OPT2_TS_EN_SHIFT     0
-#define TCP_OFFLOAD_PARAMS_OPT2_DA_EN_MASK      0x1
-#define TCP_OFFLOAD_PARAMS_OPT2_DA_EN_SHIFT     1
-#define TCP_OFFLOAD_PARAMS_OPT2_KA_EN_MASK      0x1
-#define TCP_OFFLOAD_PARAMS_OPT2_KA_EN_SHIFT     2
-#define TCP_OFFLOAD_PARAMS_OPT2_RESERVED0_MASK  0x1F
-#define TCP_OFFLOAD_PARAMS_OPT2_RESERVED0_SHIFT 3
+       __le16 flags;
+#define TCP_OFFLOAD_PARAMS_OPT2_TS_EN_MASK     0x1
+#define TCP_OFFLOAD_PARAMS_OPT2_TS_EN_SHIFT    0
+#define TCP_OFFLOAD_PARAMS_OPT2_DA_EN_MASK     0x1
+#define TCP_OFFLOAD_PARAMS_OPT2_DA_EN_SHIFT    1
+#define TCP_OFFLOAD_PARAMS_OPT2_KA_EN_MASK     0x1
+#define TCP_OFFLOAD_PARAMS_OPT2_KA_EN_SHIFT    2
+#define TCP_OFFLOAD_PARAMS_OPT2_ECN_EN_MASK    0x1
+#define TCP_OFFLOAD_PARAMS_OPT2_ECN_EN_SHIFT   3
+#define TCP_OFFLOAD_PARAMS_OPT2_RESERVED0_MASK 0xFFF
+#define TCP_OFFLOAD_PARAMS_OPT2_RESERVED0_SHIFT        4
        u8 ip_version;
+       u8 reserved1[3];
        __le32 remote_ip[4];
        __le32 local_ip[4];
        __le32 flow_label;
@@ -163,9 +181,16 @@ struct tcp_offload_params_opt2 {
        __le16 syn_ip_payload_length;
        __le32 syn_phy_addr_lo;
        __le32 syn_phy_addr_hi;
-       __le32 reserved1[22];
+       __le32 cwnd;
+       u8 ka_max_probe_cnt;
+       u8 reserved2[3];
+       __le32 ka_timeout;
+       __le32 ka_interval;
+       __le32 max_rt_time;
+       __le32 reserved3[16];
 };
 
+/* tcp IPv4/IPv6 enum */
 enum tcp_seg_placement_event {
        TCP_EVENT_ADD_PEN,
        TCP_EVENT_ADD_NEW_ISLE,
@@ -177,40 +202,41 @@ enum tcp_seg_placement_event {
        MAX_TCP_SEG_PLACEMENT_EVENT
 };
 
+/* tcp init parameters */
 struct tcp_update_params {
        __le16 flags;
-#define TCP_UPDATE_PARAMS_REMOTE_MAC_ADDR_CHANGED_MASK   0x1
-#define TCP_UPDATE_PARAMS_REMOTE_MAC_ADDR_CHANGED_SHIFT  0
-#define TCP_UPDATE_PARAMS_MSS_CHANGED_MASK               0x1
-#define TCP_UPDATE_PARAMS_MSS_CHANGED_SHIFT              1
-#define TCP_UPDATE_PARAMS_TTL_CHANGED_MASK               0x1
-#define TCP_UPDATE_PARAMS_TTL_CHANGED_SHIFT              2
-#define TCP_UPDATE_PARAMS_TOS_OR_TC_CHANGED_MASK         0x1
-#define TCP_UPDATE_PARAMS_TOS_OR_TC_CHANGED_SHIFT        3
-#define TCP_UPDATE_PARAMS_KA_TIMEOUT_CHANGED_MASK        0x1
-#define TCP_UPDATE_PARAMS_KA_TIMEOUT_CHANGED_SHIFT       4
-#define TCP_UPDATE_PARAMS_KA_INTERVAL_CHANGED_MASK       0x1
-#define TCP_UPDATE_PARAMS_KA_INTERVAL_CHANGED_SHIFT      5
-#define TCP_UPDATE_PARAMS_MAX_RT_TIME_CHANGED_MASK       0x1
-#define TCP_UPDATE_PARAMS_MAX_RT_TIME_CHANGED_SHIFT      6
-#define TCP_UPDATE_PARAMS_FLOW_LABEL_CHANGED_MASK        0x1
-#define TCP_UPDATE_PARAMS_FLOW_LABEL_CHANGED_SHIFT       7
-#define TCP_UPDATE_PARAMS_INITIAL_RCV_WND_CHANGED_MASK   0x1
-#define TCP_UPDATE_PARAMS_INITIAL_RCV_WND_CHANGED_SHIFT  8
-#define TCP_UPDATE_PARAMS_KA_MAX_PROBE_CNT_CHANGED_MASK  0x1
-#define TCP_UPDATE_PARAMS_KA_MAX_PROBE_CNT_CHANGED_SHIFT 9
-#define TCP_UPDATE_PARAMS_KA_EN_CHANGED_MASK             0x1
-#define TCP_UPDATE_PARAMS_KA_EN_CHANGED_SHIFT            10
-#define TCP_UPDATE_PARAMS_NAGLE_EN_CHANGED_MASK          0x1
-#define TCP_UPDATE_PARAMS_NAGLE_EN_CHANGED_SHIFT         11
-#define TCP_UPDATE_PARAMS_KA_EN_MASK                     0x1
-#define TCP_UPDATE_PARAMS_KA_EN_SHIFT                    12
-#define TCP_UPDATE_PARAMS_NAGLE_EN_MASK                  0x1
-#define TCP_UPDATE_PARAMS_NAGLE_EN_SHIFT                 13
-#define TCP_UPDATE_PARAMS_KA_RESTART_MASK                0x1
-#define TCP_UPDATE_PARAMS_KA_RESTART_SHIFT               14
-#define TCP_UPDATE_PARAMS_RETRANSMIT_RESTART_MASK        0x1
-#define TCP_UPDATE_PARAMS_RETRANSMIT_RESTART_SHIFT       15
+#define TCP_UPDATE_PARAMS_REMOTE_MAC_ADDR_CHANGED_MASK         0x1
+#define TCP_UPDATE_PARAMS_REMOTE_MAC_ADDR_CHANGED_SHIFT                0
+#define TCP_UPDATE_PARAMS_MSS_CHANGED_MASK                     0x1
+#define TCP_UPDATE_PARAMS_MSS_CHANGED_SHIFT                    1
+#define TCP_UPDATE_PARAMS_TTL_CHANGED_MASK                     0x1
+#define TCP_UPDATE_PARAMS_TTL_CHANGED_SHIFT                    2
+#define TCP_UPDATE_PARAMS_TOS_OR_TC_CHANGED_MASK               0x1
+#define TCP_UPDATE_PARAMS_TOS_OR_TC_CHANGED_SHIFT              3
+#define TCP_UPDATE_PARAMS_KA_TIMEOUT_CHANGED_MASK              0x1
+#define TCP_UPDATE_PARAMS_KA_TIMEOUT_CHANGED_SHIFT             4
+#define TCP_UPDATE_PARAMS_KA_INTERVAL_CHANGED_MASK             0x1
+#define TCP_UPDATE_PARAMS_KA_INTERVAL_CHANGED_SHIFT            5
+#define TCP_UPDATE_PARAMS_MAX_RT_TIME_CHANGED_MASK             0x1
+#define TCP_UPDATE_PARAMS_MAX_RT_TIME_CHANGED_SHIFT            6
+#define TCP_UPDATE_PARAMS_FLOW_LABEL_CHANGED_MASK              0x1
+#define TCP_UPDATE_PARAMS_FLOW_LABEL_CHANGED_SHIFT             7
+#define TCP_UPDATE_PARAMS_INITIAL_RCV_WND_CHANGED_MASK         0x1
+#define TCP_UPDATE_PARAMS_INITIAL_RCV_WND_CHANGED_SHIFT                8
+#define TCP_UPDATE_PARAMS_KA_MAX_PROBE_CNT_CHANGED_MASK                0x1
+#define TCP_UPDATE_PARAMS_KA_MAX_PROBE_CNT_CHANGED_SHIFT       9
+#define TCP_UPDATE_PARAMS_KA_EN_CHANGED_MASK                   0x1
+#define TCP_UPDATE_PARAMS_KA_EN_CHANGED_SHIFT                  10
+#define TCP_UPDATE_PARAMS_NAGLE_EN_CHANGED_MASK                        0x1
+#define TCP_UPDATE_PARAMS_NAGLE_EN_CHANGED_SHIFT               11
+#define TCP_UPDATE_PARAMS_KA_EN_MASK                           0x1
+#define TCP_UPDATE_PARAMS_KA_EN_SHIFT                          12
+#define TCP_UPDATE_PARAMS_NAGLE_EN_MASK                                0x1
+#define TCP_UPDATE_PARAMS_NAGLE_EN_SHIFT                       13
+#define TCP_UPDATE_PARAMS_KA_RESTART_MASK                      0x1
+#define TCP_UPDATE_PARAMS_KA_RESTART_SHIFT                     14
+#define TCP_UPDATE_PARAMS_RETRANSMIT_RESTART_MASK              0x1
+#define TCP_UPDATE_PARAMS_RETRANSMIT_RESTART_SHIFT             15
        __le16 remote_mac_addr_lo;
        __le16 remote_mac_addr_mid;
        __le16 remote_mac_addr_hi;
@@ -226,6 +252,7 @@ struct tcp_update_params {
        u8 reserved1[7];
 };
 
+/* toe upload parameters */
 struct tcp_upload_params {
        __le32 rcv_next;
        __le32 snd_una;
index 2032ce2..62d508b 100644 (file)
@@ -97,13 +97,9 @@ void rtnetlink_init(void);
 void __rtnl_unlock(void);
 void rtnl_kfree_skbs(struct sk_buff *head, struct sk_buff *tail);
 
-#define ASSERT_RTNL() do { \
-       if (unlikely(!rtnl_is_locked())) { \
-               printk(KERN_ERR "RTNL: assertion failed at %s (%d)\n", \
-                      __FILE__,  __LINE__); \
-               dump_stack(); \
-       } \
-} while(0)
+#define ASSERT_RTNL() \
+       WARN_ONCE(!rtnl_is_locked(), \
+                 "RTNL: assertion failed at %s (%d)\n", __FILE__,  __LINE__)
 
 extern int ndo_dflt_fdb_dump(struct sk_buff *skb,
                             struct netlink_callback *cb,
index 0c5c5f6..e724d5a 100644 (file)
@@ -165,7 +165,41 @@ struct sfp_eeprom_base {
        char vendor_rev[4];
        union {
                __be16 optical_wavelength;
-               u8 cable_spec;
+               __be16 cable_compliance;
+               struct {
+#if defined __BIG_ENDIAN_BITFIELD
+                       u8 reserved60_2:6;
+                       u8 fc_pi_4_app_h:1;
+                       u8 sff8431_app_e:1;
+                       u8 reserved61:8;
+#elif defined __LITTLE_ENDIAN_BITFIELD
+                       u8 sff8431_app_e:1;
+                       u8 fc_pi_4_app_h:1;
+                       u8 reserved60_2:6;
+                       u8 reserved61:8;
+#else
+#error Unknown Endian
+#endif
+               } __packed passive;
+               struct {
+#if defined __BIG_ENDIAN_BITFIELD
+                       u8 reserved60_4:4;
+                       u8 fc_pi_4_lim:1;
+                       u8 sff8431_lim:1;
+                       u8 fc_pi_4_app_h:1;
+                       u8 sff8431_app_e:1;
+                       u8 reserved61:8;
+#elif defined __LITTLE_ENDIAN_BITFIELD
+                       u8 sff8431_app_e:1;
+                       u8 fc_pi_4_app_h:1;
+                       u8 sff8431_lim:1;
+                       u8 fc_pi_4_lim:1;
+                       u8 reserved60_4:4;
+                       u8 reserved61:8;
+#else
+#error Unknown Endian
+#endif
+               } __packed active;
        } __packed;
        u8 reserved62;
        u8 cc_base;
index 7b2170b..bc6bb32 100644 (file)
@@ -126,7 +126,7 @@ void spi_statistics_add_transfer_stats(struct spi_statistics *stats,
  *     for that name.  This appears in the sysfs "modalias" attribute
  *     for driver coldplugging, and in uevents used for hotplugging
  * @cs_gpio: gpio number of the chipselect line (optional, -ENOENT when
- *     when not using a GPIO line)
+ *     not using a GPIO line)
  *
  * @statistics: statistics for the spi_device
  *
index 8b8118a..ab30a22 100644 (file)
@@ -1773,6 +1773,8 @@ enum cfg80211_signal_type {
  *     by %parent_bssid.
  * @parent_bssid: the BSS according to which %parent_tsf is set. This is set to
  *     the BSS that requested the scan in which the beacon/probe was received.
+ * @chains: bitmask for filled values in @chain_signal.
+ * @chain_signal: per-chain signal strength of last received BSS in dBm.
  */
 struct cfg80211_inform_bss {
        struct ieee80211_channel *chan;
@@ -1781,6 +1783,8 @@ struct cfg80211_inform_bss {
        u64 boottime_ns;
        u64 parent_tsf;
        u8 parent_bssid[ETH_ALEN] __aligned(2);
+       u8 chains;
+       s8 chain_signal[IEEE80211_MAX_CHAINS];
 };
 
 /**
@@ -1824,6 +1828,8 @@ struct cfg80211_bss_ies {
  *     that holds the beacon data. @beacon_ies is still valid, of course, and
  *     points to the same data as hidden_beacon_bss->beacon_ies in that case.
  * @signal: signal strength value (type depends on the wiphy's signal_type)
+ * @chains: bitmask for filled values in @chain_signal.
+ * @chain_signal: per-chain signal strength of last received BSS in dBm.
  * @priv: private area for driver use, has at least wiphy->bss_priv_size bytes
  */
 struct cfg80211_bss {
@@ -1842,6 +1848,8 @@ struct cfg80211_bss {
        u16 capability;
 
        u8 bssid[ETH_ALEN];
+       u8 chains;
+       s8 chain_signal[IEEE80211_MAX_CHAINS];
 
        u8 priv[0] __aligned(sizeof(void *));
 };
@@ -2021,6 +2029,9 @@ struct cfg80211_disassoc_request {
  * @ht_capa:  HT Capabilities over-rides.  Values set in ht_capa_mask
  *     will be used in ht_capa.  Un-supported values will be ignored.
  * @ht_capa_mask:  The bits of ht_capa which are to be used.
+ * @wep_keys: static WEP keys, if not NULL points to an array of
+ *     CFG80211_MAX_WEP_KEYS WEP keys
+ * @wep_tx_key: key index (0..3) of the default TX static WEP key
  */
 struct cfg80211_ibss_params {
        const u8 *ssid;
@@ -2037,6 +2048,8 @@ struct cfg80211_ibss_params {
        int mcast_rate[NUM_NL80211_BANDS];
        struct ieee80211_ht_cap ht_capa;
        struct ieee80211_ht_cap ht_capa_mask;
+       struct key_params *wep_keys;
+       int wep_tx_key;
 };
 
 /**
@@ -3226,7 +3239,6 @@ struct cfg80211_ops {
  * @WIPHY_FLAG_IBSS_RSN: The device supports IBSS RSN.
  * @WIPHY_FLAG_MESH_AUTH: The device supports mesh authentication by routing
  *     auth frames to userspace. See @NL80211_MESH_SETUP_USERSPACE_AUTH.
- * @WIPHY_FLAG_SUPPORTS_SCHED_SCAN: The device supports scheduled scans.
  * @WIPHY_FLAG_SUPPORTS_FW_ROAM: The device supports roaming feature in the
  *     firmware.
  * @WIPHY_FLAG_AP_UAPSD: The device supports uapsd on AP.
@@ -5576,7 +5588,7 @@ void cfg80211_conn_failed(struct net_device *dev, const u8 *mac_addr,
  * cfg80211_rx_mgmt - notification of received, unprocessed management frame
  * @wdev: wireless device receiving the frame
  * @freq: Frequency on which the frame was received in MHz
- * @sig_dbm: signal strength in mBm, or 0 if unknown
+ * @sig_dbm: signal strength in dBm, or 0 if unknown
  * @buf: Management frame (header + body)
  * @len: length of the frame data
  * @flags: flags, as defined in enum nl80211_rxmgmt_flags
@@ -5755,7 +5767,7 @@ void cfg80211_probe_status(struct net_device *dev, const u8 *addr,
  * @frame: the frame
  * @len: length of the frame
  * @freq: frequency the frame was received on
- * @sig_dbm: signal strength in mBm, or 0 if unknown
+ * @sig_dbm: signal strength in dBm, or 0 if unknown
  *
  * Use this function to report to userspace when a beacon was
  * received. It is not useful to call this when there is no
index 39efb96..0a671c3 100644 (file)
@@ -291,6 +291,31 @@ static inline void inet_sk_copy_descendant(struct sock *sk_to,
 
 int inet_sk_rebuild_header(struct sock *sk);
 
+/**
+ * inet_sk_state_load - read sk->sk_state for lockless contexts
+ * @sk: socket pointer
+ *
+ * Paired with inet_sk_state_store(). Used in places we don't hold socket lock:
+ * tcp_diag_get_info(), tcp_get_info(), tcp_poll(), get_tcp4_sock() ...
+ */
+static inline int inet_sk_state_load(const struct sock *sk)
+{
+       /* state change might impact lockless readers. */
+       return smp_load_acquire(&sk->sk_state);
+}
+
+/**
+ * inet_sk_state_store - update sk->sk_state
+ * @sk: socket pointer
+ * @newstate: new state
+ *
+ * Paired with inet_sk_state_load(). Should be used in contexts where
+ * state change might impact lockless readers.
+ */
+void inet_sk_state_store(struct sock *sk, int newstate);
+
+void inet_sk_set_state(struct sock *sk, int state);
+
 static inline unsigned int __inet_ehashfn(const __be32 laddr,
                                          const __u16 lport,
                                          const __be32 faddr,
index 44d96a9..ddf53dd 100644 (file)
@@ -173,7 +173,8 @@ struct rt6_info {
        unsigned short                  rt6i_nfheader_len;
        u8                              rt6i_protocol;
        u8                              exception_bucket_flushed:1,
-                                       unused:7;
+                                       should_flush:1,
+                                       unused:6;
 };
 
 #define for_each_fib6_node_rt_rcu(fn)                                  \
@@ -404,6 +405,7 @@ unsigned int fib6_tables_seq_read(struct net *net);
 int fib6_tables_dump(struct net *net, struct notifier_block *nb);
 
 void fib6_update_sernum(struct rt6_info *rt);
+void fib6_update_sernum_upto_root(struct net *net, struct rt6_info *rt);
 
 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
 int fib6_rules_init(void);
index 18e442e..34cd3b0 100644 (file)
@@ -165,10 +165,12 @@ struct rt6_rtnl_dump_arg {
 };
 
 int rt6_dump_route(struct rt6_info *rt, void *p_arg);
-void rt6_ifdown(struct net *net, struct net_device *dev);
 void rt6_mtu_change(struct net_device *dev, unsigned int mtu);
 void rt6_remove_prefsrc(struct inet6_ifaddr *ifp);
 void rt6_clean_tohost(struct net *net, struct in6_addr *gateway);
+void rt6_sync_up(struct net_device *dev, unsigned int nh_flags);
+void rt6_disable_ip(struct net_device *dev, unsigned long event);
+void rt6_sync_down_dev(struct net_device *dev, unsigned long event);
 
 static inline const struct rt6_info *skb_rt6_info(const struct sk_buff *skb)
 {
index eec143c..906e902 100644 (file)
@@ -1552,6 +1552,9 @@ struct wireless_dev *ieee80211_vif_to_wdev(struct ieee80211_vif *vif);
  * @IEEE80211_KEY_FLAG_RESERVE_TAILROOM: This flag should be set by the
  *     driver for a key to indicate that sufficient tailroom must always
  *     be reserved for ICV or MIC, even when HW encryption is enabled.
+ * @IEEE80211_KEY_FLAG_PUT_MIC_SPACE: This flag should be set by the driver for
+ *     a TKIP key if it only requires MIC space. Do not set together with
+ *     @IEEE80211_KEY_FLAG_GENERATE_MMIC on the same key.
  */
 enum ieee80211_key_flags {
        IEEE80211_KEY_FLAG_GENERATE_IV_MGMT     = BIT(0),
@@ -1562,6 +1565,7 @@ enum ieee80211_key_flags {
        IEEE80211_KEY_FLAG_PUT_IV_SPACE         = BIT(5),
        IEEE80211_KEY_FLAG_RX_MGMT              = BIT(6),
        IEEE80211_KEY_FLAG_RESERVE_TAILROOM     = BIT(7),
+       IEEE80211_KEY_FLAG_PUT_MIC_SPACE        = BIT(8),
 };
 
 /**
@@ -1593,8 +1597,8 @@ struct ieee80211_key_conf {
        u8 icv_len;
        u8 iv_len;
        u8 hw_key_idx;
-       u8 flags;
        s8 keyidx;
+       u16 flags;
        u8 keylen;
        u8 key[0];
 };
@@ -2056,6 +2060,9 @@ struct ieee80211_txq {
  *     The stack will not do fragmentation.
  *     The callback for @set_frag_threshold should be set as well.
  *
+ * @IEEE80211_HW_SUPPORTS_TDLS_BUFFER_STA: Hardware supports buffer STA on
+ *     TDLS links.
+ *
  * @NUM_IEEE80211_HW_FLAGS: number of hardware flags, used for sizing arrays
  */
 enum ieee80211_hw_flags {
@@ -2098,6 +2105,7 @@ enum ieee80211_hw_flags {
        IEEE80211_HW_TX_FRAG_LIST,
        IEEE80211_HW_REPORTS_LOW_ACK,
        IEEE80211_HW_SUPPORTS_TX_FRAG,
+       IEEE80211_HW_SUPPORTS_TDLS_BUFFER_STA,
 
        /* keep last, obviously */
        NUM_IEEE80211_HW_FLAGS
index 0105445..c4f4e46 100644 (file)
@@ -39,9 +39,11 @@ struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
                                bool create);
 void tcf_chain_put(struct tcf_chain *chain);
 int tcf_block_get(struct tcf_block **p_block,
-                 struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q);
+                 struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
+                 struct netlink_ext_ack *extack);
 int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
-                     struct tcf_block_ext_info *ei);
+                     struct tcf_block_ext_info *ei,
+                     struct netlink_ext_ack *extack);
 void tcf_block_put(struct tcf_block *block);
 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
                       struct tcf_block_ext_info *ei);
@@ -77,14 +79,16 @@ int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
 #else
 static inline
 int tcf_block_get(struct tcf_block **p_block,
-                 struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q)
+                 struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
+                 struct netlink_ext_ack *extack)
 {
        return 0;
 }
 
 static inline
 int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
-                     struct tcf_block_ext_info *ei)
+                     struct tcf_block_ext_info *ei,
+                     struct netlink_ext_ack *extack)
 {
        return 0;
 }
@@ -694,9 +698,7 @@ struct tc_cls_matchall_offload {
 };
 
 enum tc_clsbpf_command {
-       TC_CLSBPF_ADD,
-       TC_CLSBPF_REPLACE,
-       TC_CLSBPF_DESTROY,
+       TC_CLSBPF_OFFLOAD,
        TC_CLSBPF_STATS,
 };
 
@@ -705,6 +707,7 @@ struct tc_cls_bpf_offload {
        enum tc_clsbpf_command command;
        struct tcf_exts *exts;
        struct bpf_prog *prog;
+       struct bpf_prog *oldprog;
        const char *name;
        bool exts_integrated;
        u32 gen_flags;
index 2404692..e2c75f5 100644 (file)
@@ -89,7 +89,8 @@ extern struct Qdisc_ops pfifo_head_drop_qdisc_ops;
 
 int fifo_set_limit(struct Qdisc *q, unsigned int limit);
 struct Qdisc *fifo_create_dflt(struct Qdisc *sch, struct Qdisc_ops *ops,
-                              unsigned int limit);
+                              unsigned int limit,
+                              struct netlink_ext_ack *extack);
 
 int register_qdisc(struct Qdisc_ops *qops);
 int unregister_qdisc(struct Qdisc_ops *qops);
@@ -101,7 +102,8 @@ void qdisc_hash_del(struct Qdisc *q);
 struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle);
 struct Qdisc *qdisc_lookup_class(struct net_device *dev, u32 handle);
 struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r,
-                                       struct nlattr *tab);
+                                       struct nlattr *tab,
+                                       struct netlink_ext_ack *extack);
 void qdisc_put_rtab(struct qdisc_rate_table *tab);
 void qdisc_put_stab(struct qdisc_size_table *tab);
 void qdisc_warn_nonwc(const char *txt, struct Qdisc *qdisc);
index bc6b25f..ac029d5 100644 (file)
@@ -151,20 +151,23 @@ struct Qdisc_class_ops {
        /* Child qdisc manipulation */
        struct netdev_queue *   (*select_queue)(struct Qdisc *, struct tcmsg *);
        int                     (*graft)(struct Qdisc *, unsigned long cl,
-                                       struct Qdisc *, struct Qdisc **);
+                                       struct Qdisc *, struct Qdisc **,
+                                       struct netlink_ext_ack *extack);
        struct Qdisc *          (*leaf)(struct Qdisc *, unsigned long cl);
        void                    (*qlen_notify)(struct Qdisc *, unsigned long);
 
        /* Class manipulation routines */
        unsigned long           (*find)(struct Qdisc *, u32 classid);
        int                     (*change)(struct Qdisc *, u32, u32,
-                                       struct nlattr **, unsigned long *);
+                                       struct nlattr **, unsigned long *,
+                                       struct netlink_ext_ack *);
        int                     (*delete)(struct Qdisc *, unsigned long);
        void                    (*walk)(struct Qdisc *, struct qdisc_walker * arg);
 
        /* Filter manipulation */
        struct tcf_block *      (*tcf_block)(struct Qdisc *sch,
-                                            unsigned long arg);
+                                            unsigned long arg,
+                                            struct netlink_ext_ack *extack);
        unsigned long           (*bind_tcf)(struct Qdisc *, unsigned long,
                                        u32 classid);
        void                    (*unbind_tcf)(struct Qdisc *, unsigned long);
@@ -189,11 +192,13 @@ struct Qdisc_ops {
        struct sk_buff *        (*dequeue)(struct Qdisc *);
        struct sk_buff *        (*peek)(struct Qdisc *);
 
-       int                     (*init)(struct Qdisc *sch, struct nlattr *arg);
+       int                     (*init)(struct Qdisc *sch, struct nlattr *arg,
+                                       struct netlink_ext_ack *extack);
        void                    (*reset)(struct Qdisc *);
        void                    (*destroy)(struct Qdisc *);
        int                     (*change)(struct Qdisc *sch,
-                                         struct nlattr *arg);
+                                         struct nlattr *arg,
+                                         struct netlink_ext_ack *extack);
        void                    (*attach)(struct Qdisc *sch);
 
        int                     (*dump)(struct Qdisc *, struct sk_buff *);
@@ -466,9 +471,11 @@ void qdisc_destroy(struct Qdisc *qdisc);
 void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, unsigned int n,
                               unsigned int len);
 struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
-                         const struct Qdisc_ops *ops);
+                         const struct Qdisc_ops *ops,
+                         struct netlink_ext_ack *extack);
 struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
-                               const struct Qdisc_ops *ops, u32 parentid);
+                               const struct Qdisc_ops *ops, u32 parentid,
+                               struct netlink_ext_ack *extack);
 void __qdisc_calculate_pkt_len(struct sk_buff *skb,
                               const struct qdisc_size_table *stab);
 int skb_do_redirect(struct sk_buff *);
index 0a32f3c..66fd395 100644 (file)
@@ -1515,6 +1515,11 @@ static inline bool sock_owned_by_user(const struct sock *sk)
        return sk->sk_lock.owned;
 }
 
+static inline bool sock_owned_by_user_nocheck(const struct sock *sk)
+{
+       return sk->sk_lock.owned;
+}
+
 /* no reclassification while locks are held */
 static inline bool sock_allow_reclassification(const struct sock *csk)
 {
@@ -2333,31 +2338,6 @@ static inline bool sk_listener(const struct sock *sk)
        return (1 << sk->sk_state) & (TCPF_LISTEN | TCPF_NEW_SYN_RECV);
 }
 
-/**
- * sk_state_load - read sk->sk_state for lockless contexts
- * @sk: socket pointer
- *
- * Paired with sk_state_store(). Used in places we do not hold socket lock :
- * tcp_diag_get_info(), tcp_get_info(), tcp_poll(), get_tcp4_sock() ...
- */
-static inline int sk_state_load(const struct sock *sk)
-{
-       return smp_load_acquire(&sk->sk_state);
-}
-
-/**
- * sk_state_store - update sk->sk_state
- * @sk: socket pointer
- * @newstate: new state
- *
- * Paired with sk_state_load(). Should be used in contexts where
- * state change might impact lockless readers.
- */
-static inline void sk_state_store(struct sock *sk, int newstate)
-{
-       smp_store_release(&sk->sk_state, newstate);
-}
-
 void sock_enable_timestamp(struct sock *sk, int flag);
 int sock_get_timestamp(struct sock *, struct timeval __user *);
 int sock_get_timestampns(struct sock *, struct timespec __user *);
diff --git a/include/net/xdp.h b/include/net/xdp.h
new file mode 100644 (file)
index 0000000..b2362dd
--- /dev/null
@@ -0,0 +1,48 @@
+/* include/net/xdp.h
+ *
+ * Copyright (c) 2017 Jesper Dangaard Brouer, Red Hat Inc.
+ * Released under terms in GPL version 2.  See COPYING.
+ */
+#ifndef __LINUX_NET_XDP_H__
+#define __LINUX_NET_XDP_H__
+
+/**
+ * DOC: XDP RX-queue information
+ *
+ * The XDP RX-queue info (xdp_rxq_info) is associated with the driver
+ * level RX-ring queues.  It is information that is specific to how
+ * the driver have configured a given RX-ring queue.
+ *
+ * Each xdp_buff frame received in the driver carry a (pointer)
+ * reference to this xdp_rxq_info structure.  This provides the XDP
+ * data-path read-access to RX-info for both kernel and bpf-side
+ * (limited subset).
+ *
+ * For now, direct access is only safe while running in NAPI/softirq
+ * context.  Contents is read-mostly and must not be updated during
+ * driver NAPI/softirq poll.
+ *
+ * The driver usage API is a register and unregister API.
+ *
+ * The struct is not directly tied to the XDP prog.  A new XDP prog
+ * can be attached as long as it doesn't change the underlying
+ * RX-ring.  If the RX-ring does change significantly, the NIC driver
+ * naturally need to stop the RX-ring before purging and reallocating
+ * memory.  In that process the driver MUST call unregistor (which
+ * also apply for driver shutdown and unload).  The register API is
+ * also mandatory during RX-ring setup.
+ */
+
+struct xdp_rxq_info {
+       struct net_device *dev;
+       u32 queue_index;
+       u32 reg_state;
+} ____cacheline_aligned; /* perf critical, avoid false-sharing */
+
+int xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq,
+                    struct net_device *dev, u32 queue_index);
+void xdp_rxq_info_unreg(struct xdp_rxq_info *xdp_rxq);
+void xdp_rxq_info_unused(struct xdp_rxq_info *xdp_rxq);
+bool xdp_rxq_info_is_reg(struct xdp_rxq_info *xdp_rxq);
+
+#endif /* __LINUX_NET_XDP_H__ */
index 1ec0c47..2e6d4fe 100644 (file)
@@ -1051,6 +1051,7 @@ struct xfrm_offload {
 #define        XFRM_GSO_SEGMENT        16
 #define        XFRM_GRO                32
 #define        XFRM_ESP_NO_TRAILER     64
+#define        XFRM_DEV_RESUME         128
 
        __u32                   status;
 #define CRYPTO_SUCCESS                         1
@@ -1600,6 +1601,9 @@ int xfrm_init_state(struct xfrm_state *x);
 int xfrm_prepare_input(struct xfrm_state *x, struct sk_buff *skb);
 int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type);
 int xfrm_input_resume(struct sk_buff *skb, int nexthdr);
+int xfrm_trans_queue(struct sk_buff *skb,
+                    int (*finish)(struct net *, struct sock *,
+                                  struct sk_buff *));
 int xfrm_output_resume(struct sk_buff *skb, int err);
 int xfrm_output(struct sock *sk, struct sk_buff *skb);
 int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb);
@@ -1874,21 +1878,28 @@ static inline struct xfrm_state *xfrm_input_state(struct sk_buff *skb)
 {
        return skb->sp->xvec[skb->sp->len - 1];
 }
+#endif
+
 static inline struct xfrm_offload *xfrm_offload(struct sk_buff *skb)
 {
+#ifdef CONFIG_XFRM
        struct sec_path *sp = skb->sp;
 
        if (!sp || !sp->olen || sp->len != sp->olen)
                return NULL;
 
        return &sp->ovec[sp->olen - 1];
-}
+#else
+       return NULL;
 #endif
+}
 
 void __net_init xfrm_dev_init(void);
 
 #ifdef CONFIG_XFRM_OFFLOAD
-int validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features);
+void xfrm_dev_resume(struct sk_buff *skb);
+void xfrm_dev_backlog(struct softnet_data *sd);
+struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features, bool *again);
 int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
                       struct xfrm_user_offload *xuo);
 bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x);
@@ -1902,6 +1913,8 @@ static inline bool xfrm_dst_offload_ok(struct dst_entry *dst)
                return false;
 
        xdst = (struct xfrm_dst *) dst;
+       if (!x->xso.offload_handle && !xdst->child->xfrm)
+               return true;
        if (x->xso.offload_handle && (x->xso.dev == xfrm_dst_path(dst)->dev) &&
            !xdst->child->xfrm)
                return true;
@@ -1923,15 +1936,24 @@ static inline void xfrm_dev_state_free(struct xfrm_state *x)
         struct net_device *dev = xso->dev;
 
        if (dev && dev->xfrmdev_ops) {
-               dev->xfrmdev_ops->xdo_dev_state_free(x);
+               if (dev->xfrmdev_ops->xdo_dev_state_free)
+                       dev->xfrmdev_ops->xdo_dev_state_free(x);
                xso->dev = NULL;
                dev_put(dev);
        }
 }
 #else
-static inline int validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features)
+static inline void xfrm_dev_resume(struct sk_buff *skb)
 {
-       return 0;
+}
+
+static inline void xfrm_dev_backlog(struct softnet_data *sd)
+{
+}
+
+static inline struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features, bool *again)
+{
+       return skb;
 }
 
 static inline int xfrm_dev_state_add(struct net *net, struct xfrm_state *x, struct xfrm_user_offload *xuo)
index 7586072..2cd4493 100644 (file)
@@ -134,12 +134,12 @@ DECLARE_EVENT_CLASS(clk_parent,
 
        TP_STRUCT__entry(
                __string(        name,           core->name                )
-               __string(        pname,          parent->name              )
+               __string(        pname, parent ? parent->name : "none"     )
        ),
 
        TP_fast_assign(
                __assign_str(name, core->name);
-               __assign_str(pname, parent->name);
+               __assign_str(pname, parent ? parent->name : "none");
        ),
 
        TP_printk("%s %s", __get_str(name), __get_str(pname))
index e4b0b8e..2c735a3 100644 (file)
@@ -211,7 +211,7 @@ TRACE_EVENT(kvm_ack_irq,
        { KVM_TRACE_MMIO_WRITE, "write" }
 
 TRACE_EVENT(kvm_mmio,
-       TP_PROTO(int type, int len, u64 gpa, u64 val),
+       TP_PROTO(int type, int len, u64 gpa, void *val),
        TP_ARGS(type, len, gpa, val),
 
        TP_STRUCT__entry(
@@ -225,7 +225,10 @@ TRACE_EVENT(kvm_mmio,
                __entry->type           = type;
                __entry->len            = len;
                __entry->gpa            = gpa;
-               __entry->val            = val;
+               __entry->val            = 0;
+               if (val)
+                       memcpy(&__entry->val, val,
+                              min_t(u32, sizeof(__entry->val), len));
        ),
 
        TP_printk("mmio %s len %u gpa 0x%llx val 0x%llx",
diff --git a/include/trace/events/net_probe_common.h b/include/trace/events/net_probe_common.h
new file mode 100644 (file)
index 0000000..3930119
--- /dev/null
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#if !defined(_TRACE_NET_PROBE_COMMON_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_NET_PROBE_COMMON_H
+
+#define TP_STORE_ADDR_PORTS_V4(__entry, inet, sk)                      \
+       do {                                                            \
+               struct sockaddr_in *v4 = (void *)__entry->saddr;        \
+                                                                       \
+               v4->sin_family = AF_INET;                               \
+               v4->sin_port = inet->inet_sport;                        \
+               v4->sin_addr.s_addr = inet->inet_saddr;                 \
+               v4 = (void *)__entry->daddr;                            \
+               v4->sin_family = AF_INET;                               \
+               v4->sin_port = inet->inet_dport;                        \
+               v4->sin_addr.s_addr = inet->inet_daddr;                 \
+       } while (0)
+
+#if IS_ENABLED(CONFIG_IPV6)
+
+#define TP_STORE_ADDR_PORTS(__entry, inet, sk)                         \
+       do {                                                            \
+               if (sk->sk_family == AF_INET6) {                        \
+                       struct sockaddr_in6 *v6 = (void *)__entry->saddr; \
+                                                                       \
+                       v6->sin6_family = AF_INET6;                     \
+                       v6->sin6_port = inet->inet_sport;               \
+                       v6->sin6_addr = inet6_sk(sk)->saddr;            \
+                       v6 = (void *)__entry->daddr;                    \
+                       v6->sin6_family = AF_INET6;                     \
+                       v6->sin6_port = inet->inet_dport;               \
+                       v6->sin6_addr = sk->sk_v6_daddr;                \
+               } else                                                  \
+                       TP_STORE_ADDR_PORTS_V4(__entry, inet, sk);      \
+       } while (0)
+
+#else
+
+#define TP_STORE_ADDR_PORTS(__entry, inet, sk)         \
+       TP_STORE_ADDR_PORTS_V4(__entry, inet, sk);
+
+#endif
+
+#endif
diff --git a/include/trace/events/sctp.h b/include/trace/events/sctp.h
new file mode 100644 (file)
index 0000000..7475c7b
--- /dev/null
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM sctp
+
+#if !defined(_TRACE_SCTP_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_SCTP_H
+
+#include <net/sctp/structs.h>
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(sctp_probe_path,
+
+       TP_PROTO(struct sctp_transport *sp,
+                const struct sctp_association *asoc),
+
+       TP_ARGS(sp, asoc),
+
+       TP_STRUCT__entry(
+               __field(__u64, asoc)
+               __field(__u32, primary)
+               __array(__u8, ipaddr, sizeof(union sctp_addr))
+               __field(__u32, state)
+               __field(__u32, cwnd)
+               __field(__u32, ssthresh)
+               __field(__u32, flight_size)
+               __field(__u32, partial_bytes_acked)
+               __field(__u32, pathmtu)
+       ),
+
+       TP_fast_assign(
+               __entry->asoc = (unsigned long)asoc;
+               __entry->primary = (sp == asoc->peer.primary_path);
+               memcpy(__entry->ipaddr, &sp->ipaddr, sizeof(union sctp_addr));
+               __entry->state = sp->state;
+               __entry->cwnd = sp->cwnd;
+               __entry->ssthresh = sp->ssthresh;
+               __entry->flight_size = sp->flight_size;
+               __entry->partial_bytes_acked = sp->partial_bytes_acked;
+               __entry->pathmtu = sp->pathmtu;
+       ),
+
+       TP_printk("asoc=%#llx%s ipaddr=%pISpc state=%u cwnd=%u ssthresh=%u "
+                 "flight_size=%u partial_bytes_acked=%u pathmtu=%u",
+                 __entry->asoc, __entry->primary ? "(*)" : "",
+                 __entry->ipaddr, __entry->state, __entry->cwnd,
+                 __entry->ssthresh, __entry->flight_size,
+                 __entry->partial_bytes_acked, __entry->pathmtu)
+);
+
+TRACE_EVENT(sctp_probe,
+
+       TP_PROTO(const struct sctp_endpoint *ep,
+                const struct sctp_association *asoc,
+                struct sctp_chunk *chunk),
+
+       TP_ARGS(ep, asoc, chunk),
+
+       TP_STRUCT__entry(
+               __field(__u64, asoc)
+               __field(__u32, mark)
+               __field(__u16, bind_port)
+               __field(__u16, peer_port)
+               __field(__u32, pathmtu)
+               __field(__u32, rwnd)
+               __field(__u16, unack_data)
+       ),
+
+       TP_fast_assign(
+               struct sk_buff *skb = chunk->skb;
+
+               __entry->asoc = (unsigned long)asoc;
+               __entry->mark = skb->mark;
+               __entry->bind_port = ep->base.bind_addr.port;
+               __entry->peer_port = asoc->peer.port;
+               __entry->pathmtu = asoc->pathmtu;
+               __entry->rwnd = asoc->peer.rwnd;
+               __entry->unack_data = asoc->unack_data;
+
+               if (trace_sctp_probe_path_enabled()) {
+                       struct sctp_transport *sp;
+
+                       list_for_each_entry(sp, &asoc->peer.transport_addr_list,
+                                           transports) {
+                               trace_sctp_probe_path(sp, asoc);
+                       }
+               }
+       ),
+
+       TP_printk("asoc=%#llx mark=%#x bind_port=%d peer_port=%d pathmtu=%d "
+                 "rwnd=%u unack_data=%d",
+                 __entry->asoc, __entry->mark, __entry->bind_port,
+                 __entry->peer_port, __entry->pathmtu, __entry->rwnd,
+                 __entry->unack_data)
+);
+
+#endif /* _TRACE_SCTP_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
index ec4dade..3537c5f 100644 (file)
@@ -6,7 +6,50 @@
 #define _TRACE_SOCK_H
 
 #include <net/sock.h>
+#include <net/ipv6.h>
 #include <linux/tracepoint.h>
+#include <linux/ipv6.h>
+#include <linux/tcp.h>
+
+/* The protocol traced by sock_set_state */
+#define inet_protocol_names            \
+               EM(IPPROTO_TCP)                 \
+               EM(IPPROTO_DCCP)                \
+               EMe(IPPROTO_SCTP)
+
+#define tcp_state_names                        \
+               EM(TCP_ESTABLISHED)             \
+               EM(TCP_SYN_SENT)                \
+               EM(TCP_SYN_RECV)                \
+               EM(TCP_FIN_WAIT1)               \
+               EM(TCP_FIN_WAIT2)               \
+               EM(TCP_TIME_WAIT)               \
+               EM(TCP_CLOSE)                   \
+               EM(TCP_CLOSE_WAIT)              \
+               EM(TCP_LAST_ACK)                \
+               EM(TCP_LISTEN)                  \
+               EM(TCP_CLOSING)                 \
+               EMe(TCP_NEW_SYN_RECV)
+
+/* enums need to be exported to user space */
+#undef EM
+#undef EMe
+#define EM(a)       TRACE_DEFINE_ENUM(a);
+#define EMe(a)      TRACE_DEFINE_ENUM(a);
+
+inet_protocol_names
+tcp_state_names
+
+#undef EM
+#undef EMe
+#define EM(a)       { a, #a },
+#define EMe(a)      { a, #a }
+
+#define show_inet_protocol_name(val)    \
+       __print_symbolic(val, inet_protocol_names)
+
+#define show_tcp_state_name(val)        \
+       __print_symbolic(val, tcp_state_names)
 
 TRACE_EVENT(sock_rcvqueue_full,
 
@@ -63,6 +106,69 @@ TRACE_EVENT(sock_exceed_buf_limit,
                __entry->rmem_alloc)
 );
 
+TRACE_EVENT(inet_sock_set_state,
+
+       TP_PROTO(const struct sock *sk, const int oldstate, const int newstate),
+
+       TP_ARGS(sk, oldstate, newstate),
+
+       TP_STRUCT__entry(
+               __field(const void *, skaddr)
+               __field(int, oldstate)
+               __field(int, newstate)
+               __field(__u16, sport)
+               __field(__u16, dport)
+               __field(__u8, protocol)
+               __array(__u8, saddr, 4)
+               __array(__u8, daddr, 4)
+               __array(__u8, saddr_v6, 16)
+               __array(__u8, daddr_v6, 16)
+       ),
+
+       TP_fast_assign(
+               struct inet_sock *inet = inet_sk(sk);
+               struct in6_addr *pin6;
+               __be32 *p32;
+
+               __entry->skaddr = sk;
+               __entry->oldstate = oldstate;
+               __entry->newstate = newstate;
+
+               __entry->protocol = sk->sk_protocol;
+               __entry->sport = ntohs(inet->inet_sport);
+               __entry->dport = ntohs(inet->inet_dport);
+
+               p32 = (__be32 *) __entry->saddr;
+               *p32 = inet->inet_saddr;
+
+               p32 = (__be32 *) __entry->daddr;
+               *p32 =  inet->inet_daddr;
+
+#if IS_ENABLED(CONFIG_IPV6)
+               if (sk->sk_family == AF_INET6) {
+                       pin6 = (struct in6_addr *)__entry->saddr_v6;
+                       *pin6 = sk->sk_v6_rcv_saddr;
+                       pin6 = (struct in6_addr *)__entry->daddr_v6;
+                       *pin6 = sk->sk_v6_daddr;
+               } else
+#endif
+               {
+                       pin6 = (struct in6_addr *)__entry->saddr_v6;
+                       ipv6_addr_set_v4mapped(inet->inet_saddr, pin6);
+                       pin6 = (struct in6_addr *)__entry->daddr_v6;
+                       ipv6_addr_set_v4mapped(inet->inet_daddr, pin6);
+               }
+       ),
+
+       TP_printk("protocol=%s sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c oldstate=%s newstate=%s",
+                       show_inet_protocol_name(__entry->protocol),
+                       __entry->sport, __entry->dport,
+                       __entry->saddr, __entry->daddr,
+                       __entry->saddr_v6, __entry->daddr_v6,
+                       show_tcp_state_name(__entry->oldstate),
+                       show_tcp_state_name(__entry->newstate))
+);
+
 #endif /* _TRACE_SOCK_H */
 
 /* This part must be outside protection */
index 07cccca..878b2be 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 #undef TRACE_SYSTEM
 #define TRACE_SYSTEM tcp
 
@@ -8,22 +9,36 @@
 #include <linux/tcp.h>
 #include <linux/tracepoint.h>
 #include <net/ipv6.h>
+#include <net/tcp.h>
+
+#define TP_STORE_V4MAPPED(__entry, saddr, daddr)               \
+       do {                                                    \
+               struct in6_addr *pin6;                          \
+                                                               \
+               pin6 = (struct in6_addr *)__entry->saddr_v6;    \
+               ipv6_addr_set_v4mapped(saddr, pin6);            \
+               pin6 = (struct in6_addr *)__entry->daddr_v6;    \
+               ipv6_addr_set_v4mapped(daddr, pin6);            \
+       } while (0)
 
-#define tcp_state_name(state)  { state, #state }
-#define show_tcp_state_name(val)                       \
-       __print_symbolic(val,                           \
-               tcp_state_name(TCP_ESTABLISHED),        \
-               tcp_state_name(TCP_SYN_SENT),           \
-               tcp_state_name(TCP_SYN_RECV),           \
-               tcp_state_name(TCP_FIN_WAIT1),          \
-               tcp_state_name(TCP_FIN_WAIT2),          \
-               tcp_state_name(TCP_TIME_WAIT),          \
-               tcp_state_name(TCP_CLOSE),              \
-               tcp_state_name(TCP_CLOSE_WAIT),         \
-               tcp_state_name(TCP_LAST_ACK),           \
-               tcp_state_name(TCP_LISTEN),             \
-               tcp_state_name(TCP_CLOSING),            \
-               tcp_state_name(TCP_NEW_SYN_RECV))
+#if IS_ENABLED(CONFIG_IPV6)
+#define TP_STORE_ADDRS(__entry, saddr, daddr, saddr6, daddr6)          \
+       do {                                                            \
+               if (sk->sk_family == AF_INET6) {                        \
+                       struct in6_addr *pin6;                          \
+                                                                       \
+                       pin6 = (struct in6_addr *)__entry->saddr_v6;    \
+                       *pin6 = saddr6;                                 \
+                       pin6 = (struct in6_addr *)__entry->daddr_v6;    \
+                       *pin6 = daddr6;                                 \
+               } else {                                                \
+                       TP_STORE_V4MAPPED(__entry, saddr, daddr);       \
+               }                                                       \
+       } while (0)
+#else
+#define TP_STORE_ADDRS(__entry, saddr, daddr, saddr6, daddr6)  \
+       TP_STORE_V4MAPPED(__entry, saddr, daddr)
+#endif
 
 /*
  * tcp event with arguments sk and skb
@@ -50,7 +65,6 @@ DECLARE_EVENT_CLASS(tcp_event_sk_skb,
 
        TP_fast_assign(
                struct inet_sock *inet = inet_sk(sk);
-               struct in6_addr *pin6;
                __be32 *p32;
 
                __entry->skbaddr = skb;
@@ -65,20 +79,8 @@ DECLARE_EVENT_CLASS(tcp_event_sk_skb,
                p32 = (__be32 *) __entry->daddr;
                *p32 =  inet->inet_daddr;
 
-#if IS_ENABLED(CONFIG_IPV6)
-               if (sk->sk_family == AF_INET6) {
-                       pin6 = (struct in6_addr *)__entry->saddr_v6;
-                       *pin6 = sk->sk_v6_rcv_saddr;
-                       pin6 = (struct in6_addr *)__entry->daddr_v6;
-                       *pin6 = sk->sk_v6_daddr;
-               } else
-#endif
-               {
-                       pin6 = (struct in6_addr *)__entry->saddr_v6;
-                       ipv6_addr_set_v4mapped(inet->inet_saddr, pin6);
-                       pin6 = (struct in6_addr *)__entry->daddr_v6;
-                       ipv6_addr_set_v4mapped(inet->inet_daddr, pin6);
-               }
+               TP_STORE_ADDRS(__entry, inet->inet_saddr, inet->inet_daddr,
+                             sk->sk_v6_rcv_saddr, sk->sk_v6_daddr);
        ),
 
        TP_printk("sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c",
@@ -127,7 +129,6 @@ DECLARE_EVENT_CLASS(tcp_event_sk,
 
        TP_fast_assign(
                struct inet_sock *inet = inet_sk(sk);
-               struct in6_addr *pin6;
                __be32 *p32;
 
                __entry->skaddr = sk;
@@ -141,20 +142,8 @@ DECLARE_EVENT_CLASS(tcp_event_sk,
                p32 = (__be32 *) __entry->daddr;
                *p32 =  inet->inet_daddr;
 
-#if IS_ENABLED(CONFIG_IPV6)
-               if (sk->sk_family == AF_INET6) {
-                       pin6 = (struct in6_addr *)__entry->saddr_v6;
-                       *pin6 = sk->sk_v6_rcv_saddr;
-                       pin6 = (struct in6_addr *)__entry->daddr_v6;
-                       *pin6 = sk->sk_v6_daddr;
-               } else
-#endif
-               {
-                       pin6 = (struct in6_addr *)__entry->saddr_v6;
-                       ipv6_addr_set_v4mapped(inet->inet_saddr, pin6);
-                       pin6 = (struct in6_addr *)__entry->daddr_v6;
-                       ipv6_addr_set_v4mapped(inet->inet_daddr, pin6);
-               }
+               TP_STORE_ADDRS(__entry, inet->inet_saddr, inet->inet_daddr,
+                              sk->sk_v6_rcv_saddr, sk->sk_v6_daddr);
        ),
 
        TP_printk("sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c",
@@ -197,7 +186,6 @@ TRACE_EVENT(tcp_set_state,
 
        TP_fast_assign(
                struct inet_sock *inet = inet_sk(sk);
-               struct in6_addr *pin6;
                __be32 *p32;
 
                __entry->skaddr = sk;
@@ -213,20 +201,8 @@ TRACE_EVENT(tcp_set_state,
                p32 = (__be32 *) __entry->daddr;
                *p32 =  inet->inet_daddr;
 
-#if IS_ENABLED(CONFIG_IPV6)
-               if (sk->sk_family == AF_INET6) {
-                       pin6 = (struct in6_addr *)__entry->saddr_v6;
-                       *pin6 = sk->sk_v6_rcv_saddr;
-                       pin6 = (struct in6_addr *)__entry->daddr_v6;
-                       *pin6 = sk->sk_v6_daddr;
-               } else
-#endif
-               {
-                       pin6 = (struct in6_addr *)__entry->saddr_v6;
-                       ipv6_addr_set_v4mapped(inet->inet_saddr, pin6);
-                       pin6 = (struct in6_addr *)__entry->daddr_v6;
-                       ipv6_addr_set_v4mapped(inet->inet_daddr, pin6);
-               }
+               TP_STORE_ADDRS(__entry, inet->inet_saddr, inet->inet_daddr,
+                              sk->sk_v6_rcv_saddr, sk->sk_v6_daddr);
        ),
 
        TP_printk("sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c oldstate=%s newstate=%s",
@@ -256,7 +232,6 @@ TRACE_EVENT(tcp_retransmit_synack,
 
        TP_fast_assign(
                struct inet_request_sock *ireq = inet_rsk(req);
-               struct in6_addr *pin6;
                __be32 *p32;
 
                __entry->skaddr = sk;
@@ -271,20 +246,8 @@ TRACE_EVENT(tcp_retransmit_synack,
                p32 = (__be32 *) __entry->daddr;
                *p32 = ireq->ir_rmt_addr;
 
-#if IS_ENABLED(CONFIG_IPV6)
-               if (sk->sk_family == AF_INET6) {
-                       pin6 = (struct in6_addr *)__entry->saddr_v6;
-                       *pin6 = ireq->ir_v6_loc_addr;
-                       pin6 = (struct in6_addr *)__entry->daddr_v6;
-                       *pin6 = ireq->ir_v6_rmt_addr;
-               } else
-#endif
-               {
-                       pin6 = (struct in6_addr *)__entry->saddr_v6;
-                       ipv6_addr_set_v4mapped(ireq->ir_loc_addr, pin6);
-                       pin6 = (struct in6_addr *)__entry->daddr_v6;
-                       ipv6_addr_set_v4mapped(ireq->ir_rmt_addr, pin6);
-               }
+               TP_STORE_ADDRS(__entry, ireq->ir_loc_addr, ireq->ir_rmt_addr,
+                             ireq->ir_v6_loc_addr, ireq->ir_v6_rmt_addr);
        ),
 
        TP_printk("sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c",
@@ -293,6 +256,64 @@ TRACE_EVENT(tcp_retransmit_synack,
                  __entry->saddr_v6, __entry->daddr_v6)
 );
 
+#include <trace/events/net_probe_common.h>
+
+TRACE_EVENT(tcp_probe,
+
+       TP_PROTO(struct sock *sk, struct sk_buff *skb),
+
+       TP_ARGS(sk, skb),
+
+       TP_STRUCT__entry(
+               /* sockaddr_in6 is always bigger than sockaddr_in */
+               __array(__u8, saddr, sizeof(struct sockaddr_in6))
+               __array(__u8, daddr, sizeof(struct sockaddr_in6))
+               __field(__u16, sport)
+               __field(__u16, dport)
+               __field(__u32, mark)
+               __field(__u16, length)
+               __field(__u32, snd_nxt)
+               __field(__u32, snd_una)
+               __field(__u32, snd_cwnd)
+               __field(__u32, ssthresh)
+               __field(__u32, snd_wnd)
+               __field(__u32, srtt)
+               __field(__u32, rcv_wnd)
+       ),
+
+       TP_fast_assign(
+               const struct tcp_sock *tp = tcp_sk(sk);
+               const struct inet_sock *inet = inet_sk(sk);
+
+               memset(__entry->saddr, 0, sizeof(struct sockaddr_in6));
+               memset(__entry->daddr, 0, sizeof(struct sockaddr_in6));
+
+               TP_STORE_ADDR_PORTS(__entry, inet, sk);
+
+               /* For filtering use */
+               __entry->sport = ntohs(inet->inet_sport);
+               __entry->dport = ntohs(inet->inet_dport);
+               __entry->mark = skb->mark;
+
+               __entry->length = skb->len;
+               __entry->snd_nxt = tp->snd_nxt;
+               __entry->snd_una = tp->snd_una;
+               __entry->snd_cwnd = tp->snd_cwnd;
+               __entry->snd_wnd = tp->snd_wnd;
+               __entry->rcv_wnd = tp->rcv_wnd;
+               __entry->ssthresh = tcp_current_ssthresh(sk);
+               __entry->srtt = tp->srtt_us >> 3;
+       ),
+
+       TP_printk("src=%pISpc dest=%pISpc mark=%#x length=%d snd_nxt=%#x "
+                 "snd_una=%#x snd_cwnd=%u ssthresh=%u snd_wnd=%u srtt=%u "
+                 "rcv_wnd=%u",
+                 __entry->saddr, __entry->daddr, __entry->mark,
+                 __entry->length, __entry->snd_nxt, __entry->snd_una,
+                 __entry->snd_cwnd, __entry->ssthresh, __entry->snd_wnd,
+                 __entry->srtt, __entry->rcv_wnd)
+);
+
 #endif /* _TRACE_TCP_H */
 
 /* This part must be outside protection */
similarity index 80%
rename from net/batman-adv/packet.h
rename to include/uapi/linux/batadv_packet.h
index 8e8a5db..5cb360b 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) */
 /* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
-#ifndef _NET_BATMAN_ADV_PACKET_H_
-#define _NET_BATMAN_ADV_PACKET_H_
+#ifndef _UAPI_LINUX_BATADV_PACKET_H_
+#define _UAPI_LINUX_BATADV_PACKET_H_
 
 #include <asm/byteorder.h>
+#include <linux/if_ether.h>
 #include <linux/types.h>
 
-#define batadv_tp_is_error(n) ((u8)(n) > 127 ? 1 : 0)
+/**
+ * batadv_tp_is_error() - Check throughput meter return code for error
+ * @n: throughput meter return code
+ *
+ * Return: 0 when not error was detected, != 0 otherwise
+ */
+#define batadv_tp_is_error(n) ((__u8)(n) > 127 ? 1 : 0)
 
 /**
  * enum batadv_packettype - types for batman-adv encapsulated packets
@@ -83,12 +91,20 @@ enum batadv_subtype {
  *     one hop neighbor on the interface where it was originally received.
  */
 enum batadv_iv_flags {
-       BATADV_NOT_BEST_NEXT_HOP   = BIT(0),
-       BATADV_PRIMARIES_FIRST_HOP = BIT(1),
-       BATADV_DIRECTLINK          = BIT(2),
+       BATADV_NOT_BEST_NEXT_HOP   = 1UL << 0,
+       BATADV_PRIMARIES_FIRST_HOP = 1UL << 1,
+       BATADV_DIRECTLINK          = 1UL << 2,
 };
 
-/* ICMP message types */
+/**
+ * enum batadv_icmp_packettype - ICMP message types
+ * @BATADV_ECHO_REPLY: success reply to BATADV_ECHO_REQUEST
+ * @BATADV_DESTINATION_UNREACHABLE: failure when route to destination not found
+ * @BATADV_ECHO_REQUEST: request BATADV_ECHO_REPLY from destination
+ * @BATADV_TTL_EXCEEDED: error after BATADV_ECHO_REQUEST traversed too many hops
+ * @BATADV_PARAMETER_PROBLEM: return code for malformed messages
+ * @BATADV_TP: throughput meter packet
+ */
 enum batadv_icmp_packettype {
        BATADV_ECHO_REPLY              = 0,
        BATADV_DESTINATION_UNREACHABLE = 3,
@@ -106,9 +122,9 @@ enum batadv_icmp_packettype {
  * @BATADV_MCAST_WANT_ALL_IPV6: we want all IPv6 multicast packets
  */
 enum batadv_mcast_flags {
-       BATADV_MCAST_WANT_ALL_UNSNOOPABLES      = BIT(0),
-       BATADV_MCAST_WANT_ALL_IPV4              = BIT(1),
-       BATADV_MCAST_WANT_ALL_IPV6              = BIT(2),
+       BATADV_MCAST_WANT_ALL_UNSNOOPABLES      = 1UL << 0,
+       BATADV_MCAST_WANT_ALL_IPV4              = 1UL << 1,
+       BATADV_MCAST_WANT_ALL_IPV6              = 1UL << 2,
 };
 
 /* tt data subtypes */
@@ -122,10 +138,10 @@ enum batadv_mcast_flags {
  * @BATADV_TT_FULL_TABLE: contains full table to replace existing table
  */
 enum batadv_tt_data_flags {
-       BATADV_TT_OGM_DIFF   = BIT(0),
-       BATADV_TT_REQUEST    = BIT(1),
-       BATADV_TT_RESPONSE   = BIT(2),
-       BATADV_TT_FULL_TABLE = BIT(4),
+       BATADV_TT_OGM_DIFF   = 1UL << 0,
+       BATADV_TT_REQUEST    = 1UL << 1,
+       BATADV_TT_RESPONSE   = 1UL << 2,
+       BATADV_TT_FULL_TABLE = 1UL << 4,
 };
 
 /**
@@ -133,10 +149,17 @@ enum batadv_tt_data_flags {
  * @BATADV_VLAN_HAS_TAG: whether the field contains a valid vlan tag or not
  */
 enum batadv_vlan_flags {
-       BATADV_VLAN_HAS_TAG     = BIT(15),
+       BATADV_VLAN_HAS_TAG     = 1UL << 15,
 };
 
-/* claim frame types for the bridge loop avoidance */
+/**
+ * enum batadv_bla_claimframe - claim frame types for the bridge loop avoidance
+ * @BATADV_CLAIM_TYPE_CLAIM: claim of a client mac address
+ * @BATADV_CLAIM_TYPE_UNCLAIM: unclaim of a client mac address
+ * @BATADV_CLAIM_TYPE_ANNOUNCE: announcement of backbone with current crc
+ * @BATADV_CLAIM_TYPE_REQUEST: request of full claim table
+ * @BATADV_CLAIM_TYPE_LOOPDETECT: mesh-traversing loop detect packet
+ */
 enum batadv_bla_claimframe {
        BATADV_CLAIM_TYPE_CLAIM         = 0x00,
        BATADV_CLAIM_TYPE_UNCLAIM       = 0x01,
@@ -168,8 +191,8 @@ enum batadv_tvlv_type {
  * transport the claim type and the group id
  */
 struct batadv_bla_claim_dst {
-       u8     magic[3];        /* FF:43:05 */
-       u8     type;            /* bla_claimframe */
+       __u8   magic[3];        /* FF:43:05 */
+       __u8   type;            /* bla_claimframe */
        __be16 group;           /* group id */
 };
 
@@ -189,15 +212,15 @@ struct batadv_bla_claim_dst {
  * @tvlv_len: length of tvlv data following the ogm header
  */
 struct batadv_ogm_packet {
-       u8     packet_type;
-       u8     version;
-       u8     ttl;
-       u8     flags;
+       __u8   packet_type;
+       __u8   version;
+       __u8   ttl;
+       __u8   flags;
        __be32 seqno;
-       u8     orig[ETH_ALEN];
-       u8     prev_sender[ETH_ALEN];
-       u8     reserved;
-       u8     tq;
+       __u8   orig[ETH_ALEN];
+       __u8   prev_sender[ETH_ALEN];
+       __u8   reserved;
+       __u8   tq;
        __be16 tvlv_len;
        /* __packed is not needed as the struct size is divisible by 4,
         * and the largest data type in this struct has a size of 4.
@@ -218,12 +241,12 @@ struct batadv_ogm_packet {
  * @throughput: the currently flooded path throughput
  */
 struct batadv_ogm2_packet {
-       u8     packet_type;
-       u8     version;
-       u8     ttl;
-       u8     flags;
+       __u8   packet_type;
+       __u8   version;
+       __u8   ttl;
+       __u8   flags;
        __be32 seqno;
-       u8     orig[ETH_ALEN];
+       __u8   orig[ETH_ALEN];
        __be16 tvlv_len;
        __be32 throughput;
        /* __packed is not needed as the struct size is divisible by 4,
@@ -242,9 +265,9 @@ struct batadv_ogm2_packet {
  * @elp_interval: currently used ELP sending interval in ms
  */
 struct batadv_elp_packet {
-       u8     packet_type;
-       u8     version;
-       u8     orig[ETH_ALEN];
+       __u8   packet_type;
+       __u8   version;
+       __u8   orig[ETH_ALEN];
        __be32 seqno;
        __be32 elp_interval;
 };
@@ -267,14 +290,14 @@ struct batadv_elp_packet {
  * members are padded the same way as they are in real packets.
  */
 struct batadv_icmp_header {
-       u8 packet_type;
-       u8 version;
-       u8 ttl;
-       u8 msg_type; /* see ICMP message types above */
-       u8 dst[ETH_ALEN];
-       u8 orig[ETH_ALEN];
-       u8 uid;
-       u8 align[3];
+       __u8 packet_type;
+       __u8 version;
+       __u8 ttl;
+       __u8 msg_type; /* see ICMP message types above */
+       __u8 dst[ETH_ALEN];
+       __u8 orig[ETH_ALEN];
+       __u8 uid;
+       __u8 align[3];
 };
 
 /**
@@ -290,14 +313,14 @@ struct batadv_icmp_header {
  * @seqno: ICMP sequence number
  */
 struct batadv_icmp_packet {
-       u8     packet_type;
-       u8     version;
-       u8     ttl;
-       u8     msg_type; /* see ICMP message types above */
-       u8     dst[ETH_ALEN];
-       u8     orig[ETH_ALEN];
-       u8     uid;
-       u8     reserved;
+       __u8   packet_type;
+       __u8   version;
+       __u8   ttl;
+       __u8   msg_type; /* see ICMP message types above */
+       __u8   dst[ETH_ALEN];
+       __u8   orig[ETH_ALEN];
+       __u8   uid;
+       __u8   reserved;
        __be16 seqno;
 };
 
@@ -319,15 +342,15 @@ struct batadv_icmp_packet {
  *  store it using network order
  */
 struct batadv_icmp_tp_packet {
-       u8  packet_type;
-       u8  version;
-       u8  ttl;
-       u8  msg_type; /* see ICMP message types above */
-       u8  dst[ETH_ALEN];
-       u8  orig[ETH_ALEN];
-       u8  uid;
-       u8  subtype;
-       u8  session[2];
+       __u8   packet_type;
+       __u8   version;
+       __u8   ttl;
+       __u8   msg_type; /* see ICMP message types above */
+       __u8   dst[ETH_ALEN];
+       __u8   orig[ETH_ALEN];
+       __u8   uid;
+       __u8   subtype;
+       __u8   session[2];
        __be32 seqno;
        __be32 timestamp;
 };
@@ -358,16 +381,16 @@ enum batadv_icmp_tp_subtype {
  * @rr: route record array
  */
 struct batadv_icmp_packet_rr {
-       u8     packet_type;
-       u8     version;
-       u8     ttl;
-       u8     msg_type; /* see ICMP message types above */
-       u8     dst[ETH_ALEN];
-       u8     orig[ETH_ALEN];
-       u8     uid;
-       u8     rr_cur;
+       __u8   packet_type;
+       __u8   version;
+       __u8   ttl;
+       __u8   msg_type; /* see ICMP message types above */
+       __u8   dst[ETH_ALEN];
+       __u8   orig[ETH_ALEN];
+       __u8   uid;
+       __u8   rr_cur;
        __be16 seqno;
-       u8     rr[BATADV_RR_LEN][ETH_ALEN];
+       __u8   rr[BATADV_RR_LEN][ETH_ALEN];
 };
 
 #define BATADV_ICMP_MAX_PACKET_SIZE    sizeof(struct batadv_icmp_packet_rr)
@@ -393,11 +416,11 @@ struct batadv_icmp_packet_rr {
  * @dest: originator destination of the unicast packet
  */
 struct batadv_unicast_packet {
-       u8 packet_type;
-       u8 version;
-       u8 ttl;
-       u8 ttvn; /* destination translation table version number */
-       u8 dest[ETH_ALEN];
+       __u8 packet_type;
+       __u8 version;
+       __u8 ttl;
+       __u8 ttvn; /* destination translation table version number */
+       __u8 dest[ETH_ALEN];
        /* "4 bytes boundary + 2 bytes" long to make the payload after the
         * following ethernet header again 4 bytes boundary aligned
         */
@@ -412,9 +435,9 @@ struct batadv_unicast_packet {
  */
 struct batadv_unicast_4addr_packet {
        struct batadv_unicast_packet u;
-       u8 src[ETH_ALEN];
-       u8 subtype;
-       u8 reserved;
+       __u8 src[ETH_ALEN];
+       __u8 subtype;
+       __u8 reserved;
        /* "4 bytes boundary + 2 bytes" long to make the payload after the
         * following ethernet header again 4 bytes boundary aligned
         */
@@ -434,22 +457,22 @@ struct batadv_unicast_4addr_packet {
  * @total_size: size of the merged packet
  */
 struct batadv_frag_packet {
-       u8     packet_type;
-       u8     version;  /* batman version field */
-       u8     ttl;
+       __u8   packet_type;
+       __u8   version;  /* batman version field */
+       __u8   ttl;
 #if defined(__BIG_ENDIAN_BITFIELD)
-       u8     no:4;
-       u8     priority:3;
-       u8     reserved:1;
+       __u8   no:4;
+       __u8   priority:3;
+       __u8   reserved:1;
 #elif defined(__LITTLE_ENDIAN_BITFIELD)
-       u8     reserved:1;
-       u8     priority:3;
-       u8     no:4;
+       __u8   reserved:1;
+       __u8   priority:3;
+       __u8   no:4;
 #else
 #error "unknown bitfield endianness"
 #endif
-       u8     dest[ETH_ALEN];
-       u8     orig[ETH_ALEN];
+       __u8   dest[ETH_ALEN];
+       __u8   orig[ETH_ALEN];
        __be16 seqno;
        __be16 total_size;
 };
@@ -464,12 +487,12 @@ struct batadv_frag_packet {
  * @orig: originator of the broadcast packet
  */
 struct batadv_bcast_packet {
-       u8     packet_type;
-       u8     version;  /* batman version field */
-       u8     ttl;
-       u8     reserved;
+       __u8   packet_type;
+       __u8   version;  /* batman version field */
+       __u8   ttl;
+       __u8   reserved;
        __be32 seqno;
-       u8     orig[ETH_ALEN];
+       __u8   orig[ETH_ALEN];
        /* "4 bytes boundary + 2 bytes" long to make the payload after the
         * following ethernet header again 4 bytes boundary aligned
         */
@@ -493,19 +516,19 @@ struct batadv_bcast_packet {
  * @coded_len: length of network coded part of the payload
  */
 struct batadv_coded_packet {
-       u8     packet_type;
-       u8     version;  /* batman version field */
-       u8     ttl;
-       u8     first_ttvn;
-       /* u8  first_dest[ETH_ALEN]; - saved in mac header destination */
-       u8     first_source[ETH_ALEN];
-       u8     first_orig_dest[ETH_ALEN];
+       __u8   packet_type;
+       __u8   version;  /* batman version field */
+       __u8   ttl;
+       __u8   first_ttvn;
+       /* __u8 first_dest[ETH_ALEN]; - saved in mac header destination */
+       __u8   first_source[ETH_ALEN];
+       __u8   first_orig_dest[ETH_ALEN];
        __be32 first_crc;
-       u8     second_ttl;
-       u8     second_ttvn;
-       u8     second_dest[ETH_ALEN];
-       u8     second_source[ETH_ALEN];
-       u8     second_orig_dest[ETH_ALEN];
+       __u8   second_ttl;
+       __u8   second_ttvn;
+       __u8   second_dest[ETH_ALEN];
+       __u8   second_source[ETH_ALEN];
+       __u8   second_orig_dest[ETH_ALEN];
        __be32 second_crc;
        __be16 coded_len;
 };
@@ -524,14 +547,14 @@ struct batadv_coded_packet {
  * @align: 2 bytes to align the header to a 4 byte boundary
  */
 struct batadv_unicast_tvlv_packet {
-       u8     packet_type;
-       u8     version;  /* batman version field */
-       u8     ttl;
-       u8     reserved;
-       u8     dst[ETH_ALEN];
-       u8     src[ETH_ALEN];
+       __u8   packet_type;
+       __u8   version;  /* batman version field */
+       __u8   ttl;
+       __u8   reserved;
+       __u8   dst[ETH_ALEN];
+       __u8   src[ETH_ALEN];
        __be16 tvlv_len;
-       u16    align;
+       __u16  align;
 };
 
 /**
@@ -541,8 +564,8 @@ struct batadv_unicast_tvlv_packet {
  * @len: tvlv container length
  */
 struct batadv_tvlv_hdr {
-       u8     type;
-       u8     version;
+       __u8   type;
+       __u8   version;
        __be16 len;
 };
 
@@ -565,8 +588,8 @@ struct batadv_tvlv_gateway_data {
  *  one batadv_tvlv_tt_vlan_data object per announced vlan
  */
 struct batadv_tvlv_tt_data {
-       u8     flags;
-       u8     ttvn;
+       __u8   flags;
+       __u8   ttvn;
        __be16 num_vlan;
 };
 
@@ -580,7 +603,7 @@ struct batadv_tvlv_tt_data {
 struct batadv_tvlv_tt_vlan_data {
        __be32 crc;
        __be16 vid;
-       u16    reserved;
+       __u16  reserved;
 };
 
 /**
@@ -592,9 +615,9 @@ struct batadv_tvlv_tt_vlan_data {
  * @vid: VLAN identifier
  */
 struct batadv_tvlv_tt_change {
-       u8     flags;
-       u8     reserved[3];
-       u8     addr[ETH_ALEN];
+       __u8   flags;
+       __u8   reserved[3];
+       __u8   addr[ETH_ALEN];
        __be16 vid;
 };
 
@@ -604,7 +627,7 @@ struct batadv_tvlv_tt_change {
  * @vid: VLAN identifier
  */
 struct batadv_tvlv_roam_adv {
-       u8     client[ETH_ALEN];
+       __u8   client[ETH_ALEN];
        __be16 vid;
 };
 
@@ -614,8 +637,8 @@ struct batadv_tvlv_roam_adv {
  * @reserved: reserved field
  */
 struct batadv_tvlv_mcast_data {
-       u8 flags;
-       u8 reserved[3];
+       __u8 flags;
+       __u8 reserved[3];
 };
 
-#endif /* _NET_BATMAN_ADV_PACKET_H_ */
+#endif /* _UAPI_LINUX_BATADV_PACKET_H_ */
index efd641c..ae00c99 100644 (file)
@@ -1,18 +1,25 @@
+/* SPDX-License-Identifier: MIT */
 /* Copyright (C) 2016-2017  B.A.T.M.A.N. contributors:
  *
  * Matthias Schiffer
  *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
  *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
  */
 
 #ifndef _UAPI_LINUX_BATMAN_ADV_H_
index d01f1cb..405317f 100644 (file)
@@ -899,6 +899,9 @@ struct xdp_md {
        __u32 data;
        __u32 data_end;
        __u32 data_meta;
+       /* Below access go though struct xdp_rxq_info */
+       __u32 ingress_ifindex; /* rxq->dev->ifindex */
+       __u32 rx_queue_index;  /* rxq->queue_index  */
 };
 
 enum sk_action {
@@ -921,6 +924,9 @@ struct bpf_prog_info {
        __u32 nr_map_ids;
        __aligned_u64 map_ids;
        char name[BPF_OBJ_NAME_LEN];
+       __u32 ifindex;
+       __u64 netns_dev;
+       __u64 netns_ino;
 } __attribute__((aligned(8)));
 
 struct bpf_map_info {
@@ -1012,7 +1018,8 @@ struct bpf_perf_event_value {
 #define BPF_DEVCG_DEV_CHAR     (1ULL << 1)
 
 struct bpf_cgroup_dev_ctx {
-       __u32 access_type; /* (access << 16) | type */
+       /* access_type encoded as (BPF_DEVCG_ACC_* << 16) | BPF_DEVCG_DEV_* */
+       __u32 access_type;
        __u32 major;
        __u32 minor;
 };
index 817d807..14565d7 100644 (file)
@@ -92,6 +92,8 @@ enum {
        INET_DIAG_BC_D_COND,
        INET_DIAG_BC_DEV_COND,   /* u32 ifindex */
        INET_DIAG_BC_MARK_COND,
+       INET_DIAG_BC_S_EQ,
+       INET_DIAG_BC_D_EQ,
 };
 
 struct inet_diag_hostcond {
index d84ce5c..f78eef4 100644 (file)
@@ -94,7 +94,7 @@ enum {
        L2TP_ATTR_NONE,                 /* no data */
        L2TP_ATTR_PW_TYPE,              /* u16, enum l2tp_pwtype */
        L2TP_ATTR_ENCAP_TYPE,           /* u16, enum l2tp_encap_type */
-       L2TP_ATTR_OFFSET,               /* u16 */
+       L2TP_ATTR_OFFSET,               /* u16 (not used) */
        L2TP_ATTR_DATA_SEQ,             /* u16 */
        L2TP_ATTR_L2SPEC_TYPE,          /* u8, enum l2tp_l2spec_type */
        L2TP_ATTR_L2SPEC_LEN,           /* u8, enum l2tp_l2spec_type */
index f882fe1..c587a61 100644 (file)
@@ -3862,6 +3862,9 @@ enum nl80211_bss_scan_width {
  *     @NL80211_BSS_PARENT_BSSID. (u64).
  * @NL80211_BSS_PARENT_BSSID: the BSS according to which @NL80211_BSS_PARENT_TSF
  *     is set.
+ * @NL80211_BSS_CHAIN_SIGNAL: per-chain signal strength of last BSS update.
+ *     Contains a nested array of signal strength attributes (u8, dBm),
+ *     using the nesting index as the antenna number.
  * @__NL80211_BSS_AFTER_LAST: internal
  * @NL80211_BSS_MAX: highest BSS attribute
  */
@@ -3885,6 +3888,7 @@ enum nl80211_bss {
        NL80211_BSS_PAD,
        NL80211_BSS_PARENT_TSF,
        NL80211_BSS_PARENT_BSSID,
+       NL80211_BSS_CHAIN_SIGNAL,
 
        /* keep last */
        __NL80211_BSS_AFTER_LAST,
index 4914b93..61f410f 100644 (file)
@@ -44,3 +44,8 @@ static inline void xen_balloon_init(void)
 {
 }
 #endif
+
+#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
+struct resource;
+void arch_xen_balloon_init(struct resource *hostmem_resource);
+#endif
index e96e3a1..7b606fc 100644 (file)
@@ -504,6 +504,8 @@ static void __init mm_init(void)
        pgtable_init();
        vmalloc_init();
        ioremap_huge_init();
+       /* Should be run before the first non-init thread is created */
+       init_espfix_bsp();
 }
 
 asmlinkage __visible void __init start_kernel(void)
@@ -679,10 +681,6 @@ asmlinkage __visible void __init start_kernel(void)
        if (efi_enabled(EFI_RUNTIME_SERVICES))
                efi_enter_virtual_mode();
 #endif
-#ifdef CONFIG_X86_ESPFIX64
-       /* Should be run before the first non-init thread is created */
-       init_espfix_bsp();
-#endif
        thread_stack_cache_init();
        cred_init();
        fork_init();
index e691da0..a713fd2 100644 (file)
@@ -9,9 +9,11 @@ obj-$(CONFIG_BPF_SYSCALL) += devmap.o
 obj-$(CONFIG_BPF_SYSCALL) += cpumap.o
 obj-$(CONFIG_BPF_SYSCALL) += offload.o
 ifeq ($(CONFIG_STREAM_PARSER),y)
+ifeq ($(CONFIG_INET),y)
 obj-$(CONFIG_BPF_SYSCALL) += sockmap.o
 endif
 endif
+endif
 ifeq ($(CONFIG_PERF_EVENTS),y)
 obj-$(CONFIG_BPF_SYSCALL) += stackmap.o
 endif
index b789ab7..c1c0b60 100644 (file)
@@ -568,6 +568,8 @@ static bool cgroup_dev_is_valid_access(int off, int size,
                                       enum bpf_access_type type,
                                       struct bpf_insn_access_aux *info)
 {
+       const int size_default = sizeof(__u32);
+
        if (type == BPF_WRITE)
                return false;
 
@@ -576,8 +578,17 @@ static bool cgroup_dev_is_valid_access(int off, int size,
        /* The verifier guarantees that size > 0. */
        if (off % size != 0)
                return false;
-       if (size != sizeof(__u32))
-               return false;
+
+       switch (off) {
+       case bpf_ctx_range(struct bpf_cgroup_dev_ctx, access_type):
+               bpf_ctx_record_field_size(info, size_default);
+               if (!bpf_ctx_narrow_access_ok(off, size, size_default))
+                       return false;
+               break;
+       default:
+               if (size != size_default)
+                       return false;
+       }
 
        return true;
 }
index 768e0a0..70a5345 100644 (file)
@@ -771,7 +771,9 @@ struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog)
 
 /* Base function for offset calculation. Needs to go into .text section,
  * therefore keeping it non-static as well; will also be used by JITs
- * anyway later on, so do not let the compiler omit it.
+ * anyway later on, so do not let the compiler omit it. This also needs
+ * to go into kallsyms for correlation from e.g. bpftool, so naming
+ * must not change.
  */
 noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
 {
index 883f88f..8740406 100644 (file)
@@ -21,10 +21,39 @@ static const char * const func_id_str[] = {
 };
 #undef __BPF_FUNC_STR_FN
 
-const char *func_id_name(int id)
+static const char *__func_get_name(const struct bpf_insn_cbs *cbs,
+                                  const struct bpf_insn *insn,
+                                  char *buff, size_t len)
 {
        BUILD_BUG_ON(ARRAY_SIZE(func_id_str) != __BPF_FUNC_MAX_ID);
 
+       if (insn->src_reg != BPF_PSEUDO_CALL &&
+           insn->imm >= 0 && insn->imm < __BPF_FUNC_MAX_ID &&
+           func_id_str[insn->imm])
+               return func_id_str[insn->imm];
+
+       if (cbs && cbs->cb_call)
+               return cbs->cb_call(cbs->private_data, insn);
+
+       if (insn->src_reg == BPF_PSEUDO_CALL)
+               snprintf(buff, len, "%+d", insn->imm);
+
+       return buff;
+}
+
+static const char *__func_imm_name(const struct bpf_insn_cbs *cbs,
+                                  const struct bpf_insn *insn,
+                                  u64 full_imm, char *buff, size_t len)
+{
+       if (cbs && cbs->cb_imm)
+               return cbs->cb_imm(cbs->private_data, insn, full_imm);
+
+       snprintf(buff, len, "0x%llx", (unsigned long long)full_imm);
+       return buff;
+}
+
+const char *func_id_name(int id)
+{
        if (id >= 0 && id < __BPF_FUNC_MAX_ID && func_id_str[id])
                return func_id_str[id];
        else
@@ -83,7 +112,7 @@ static const char *const bpf_jmp_string[16] = {
        [BPF_EXIT >> 4] = "exit",
 };
 
-static void print_bpf_end_insn(bpf_insn_print_cb verbose,
+static void print_bpf_end_insn(bpf_insn_print_t verbose,
                               struct bpf_verifier_env *env,
                               const struct bpf_insn *insn)
 {
@@ -92,9 +121,12 @@ static void print_bpf_end_insn(bpf_insn_print_cb verbose,
                insn->imm, insn->dst_reg);
 }
 
-void print_bpf_insn(bpf_insn_print_cb verbose, struct bpf_verifier_env *env,
-                   const struct bpf_insn *insn, bool allow_ptr_leaks)
+void print_bpf_insn(const struct bpf_insn_cbs *cbs,
+                   struct bpf_verifier_env *env,
+                   const struct bpf_insn *insn,
+                   bool allow_ptr_leaks)
 {
+       const bpf_insn_print_t verbose = cbs->cb_print;
        u8 class = BPF_CLASS(insn->code);
 
        if (class == BPF_ALU || class == BPF_ALU64) {
@@ -175,12 +207,15 @@ void print_bpf_insn(bpf_insn_print_cb verbose, struct bpf_verifier_env *env,
                         */
                        u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm;
                        bool map_ptr = insn->src_reg == BPF_PSEUDO_MAP_FD;
+                       char tmp[64];
 
                        if (map_ptr && !allow_ptr_leaks)
                                imm = 0;
 
-                       verbose(env, "(%02x) r%d = 0x%llx\n", insn->code,
-                               insn->dst_reg, (unsigned long long)imm);
+                       verbose(env, "(%02x) r%d = %s\n",
+                               insn->code, insn->dst_reg,
+                               __func_imm_name(cbs, insn, imm,
+                                               tmp, sizeof(tmp)));
                } else {
                        verbose(env, "BUG_ld_%02x\n", insn->code);
                        return;
@@ -189,12 +224,20 @@ void print_bpf_insn(bpf_insn_print_cb verbose, struct bpf_verifier_env *env,
                u8 opcode = BPF_OP(insn->code);
 
                if (opcode == BPF_CALL) {
-                       if (insn->src_reg == BPF_PSEUDO_CALL)
-                               verbose(env, "(%02x) call pc%+d\n", insn->code,
-                                       insn->imm);
-                       else
+                       char tmp[64];
+
+                       if (insn->src_reg == BPF_PSEUDO_CALL) {
+                               verbose(env, "(%02x) call pc%s\n",
+                                       insn->code,
+                                       __func_get_name(cbs, insn,
+                                                       tmp, sizeof(tmp)));
+                       } else {
+                               strcpy(tmp, "unknown");
                                verbose(env, "(%02x) call %s#%d\n", insn->code,
-                                       func_id_name(insn->imm), insn->imm);
+                                       __func_get_name(cbs, insn,
+                                                       tmp, sizeof(tmp)),
+                                       insn->imm);
+                       }
                } else if (insn->code == (BPF_JMP | BPF_JA)) {
                        verbose(env, "(%02x) goto pc%+d\n",
                                insn->code, insn->off);
index 8de977e..e0857d0 100644 (file)
 #include <linux/bpf.h>
 #include <linux/kernel.h>
 #include <linux/stringify.h>
+#ifndef __KERNEL__
+#include <stdio.h>
+#include <string.h>
+#endif
+
+struct bpf_verifier_env;
 
 extern const char *const bpf_alu_string[16];
 extern const char *const bpf_class_string[8];
 
 const char *func_id_name(int id);
 
-struct bpf_verifier_env;
-typedef void (*bpf_insn_print_cb)(struct bpf_verifier_env *env,
-                                 const char *, ...);
-void print_bpf_insn(bpf_insn_print_cb verbose, struct bpf_verifier_env *env,
-                   const struct bpf_insn *insn, bool allow_ptr_leaks);
+typedef void (*bpf_insn_print_t)(struct bpf_verifier_env *env,
+                                const char *, ...);
+typedef const char *(*bpf_insn_revmap_call_t)(void *private_data,
+                                             const struct bpf_insn *insn);
+typedef const char *(*bpf_insn_print_imm_t)(void *private_data,
+                                           const struct bpf_insn *insn,
+                                           __u64 full_imm);
+
+struct bpf_insn_cbs {
+       bpf_insn_print_t        cb_print;
+       bpf_insn_revmap_call_t  cb_call;
+       bpf_insn_print_imm_t    cb_imm;
+       void                    *private_data;
+};
 
+void print_bpf_insn(const struct bpf_insn_cbs *cbs,
+                   struct bpf_verifier_env *env,
+                   const struct bpf_insn *insn,
+                   bool allow_ptr_leaks);
 #endif
index 8455b89..040d4e0 100644 (file)
 #include <linux/bpf.h>
 #include <linux/bpf_verifier.h>
 #include <linux/bug.h>
+#include <linux/kdev_t.h>
 #include <linux/list.h>
 #include <linux/netdevice.h>
 #include <linux/printk.h>
+#include <linux/proc_ns.h>
 #include <linux/rtnetlink.h>
+#include <linux/rwsem.h>
 
-/* protected by RTNL */
+/* Protects bpf_prog_offload_devs and offload members of all progs.
+ * RTNL lock cannot be taken when holding this lock.
+ */
+static DECLARE_RWSEM(bpf_devs_lock);
 static LIST_HEAD(bpf_prog_offload_devs);
 
 int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr)
 {
-       struct net *net = current->nsproxy->net_ns;
        struct bpf_dev_offload *offload;
 
        if (attr->prog_type != BPF_PROG_TYPE_SCHED_CLS &&
@@ -41,32 +46,40 @@ int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr)
                return -ENOMEM;
 
        offload->prog = prog;
-       init_waitqueue_head(&offload->verifier_done);
 
-       rtnl_lock();
-       offload->netdev = __dev_get_by_index(net, attr->prog_ifindex);
-       if (!offload->netdev) {
-               rtnl_unlock();
-               kfree(offload);
-               return -EINVAL;
-       }
+       offload->netdev = dev_get_by_index(current->nsproxy->net_ns,
+                                          attr->prog_ifindex);
+       if (!offload->netdev)
+               goto err_free;
 
+       down_write(&bpf_devs_lock);
+       if (offload->netdev->reg_state != NETREG_REGISTERED)
+               goto err_unlock;
        prog->aux->offload = offload;
        list_add_tail(&offload->offloads, &bpf_prog_offload_devs);
-       rtnl_unlock();
+       dev_put(offload->netdev);
+       up_write(&bpf_devs_lock);
 
        return 0;
+err_unlock:
+       up_write(&bpf_devs_lock);
+       dev_put(offload->netdev);
+err_free:
+       kfree(offload);
+       return -EINVAL;
 }
 
 static int __bpf_offload_ndo(struct bpf_prog *prog, enum bpf_netdev_command cmd,
                             struct netdev_bpf *data)
 {
-       struct net_device *netdev = prog->aux->offload->netdev;
+       struct bpf_dev_offload *offload = prog->aux->offload;
+       struct net_device *netdev;
 
        ASSERT_RTNL();
 
-       if (!netdev)
+       if (!offload)
                return -ENODEV;
+       netdev = offload->netdev;
        if (!netdev->netdev_ops->ndo_bpf)
                return -EOPNOTSUPP;
 
@@ -87,62 +100,63 @@ int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env)
        if (err)
                goto exit_unlock;
 
-       env->dev_ops = data.verifier.ops;
-
+       env->prog->aux->offload->dev_ops = data.verifier.ops;
        env->prog->aux->offload->dev_state = true;
-       env->prog->aux->offload->verifier_running = true;
 exit_unlock:
        rtnl_unlock();
        return err;
 }
 
+int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env,
+                                int insn_idx, int prev_insn_idx)
+{
+       struct bpf_dev_offload *offload;
+       int ret = -ENODEV;
+
+       down_read(&bpf_devs_lock);
+       offload = env->prog->aux->offload;
+       if (offload)
+               ret = offload->dev_ops->insn_hook(env, insn_idx, prev_insn_idx);
+       up_read(&bpf_devs_lock);
+
+       return ret;
+}
+
 static void __bpf_prog_offload_destroy(struct bpf_prog *prog)
 {
        struct bpf_dev_offload *offload = prog->aux->offload;
        struct netdev_bpf data = {};
 
-       /* Caution - if netdev is destroyed before the program, this function
-        * will be called twice.
-        */
-
        data.offload.prog = prog;
 
-       if (offload->verifier_running)
-               wait_event(offload->verifier_done, !offload->verifier_running);
-
        if (offload->dev_state)
                WARN_ON(__bpf_offload_ndo(prog, BPF_OFFLOAD_DESTROY, &data));
 
-       offload->dev_state = false;
+       /* Make sure BPF_PROG_GET_NEXT_ID can't find this dead program */
+       bpf_prog_free_id(prog, true);
+
        list_del_init(&offload->offloads);
-       offload->netdev = NULL;
+       kfree(offload);
+       prog->aux->offload = NULL;
 }
 
 void bpf_prog_offload_destroy(struct bpf_prog *prog)
 {
-       struct bpf_dev_offload *offload = prog->aux->offload;
-
-       offload->verifier_running = false;
-       wake_up(&offload->verifier_done);
-
        rtnl_lock();
-       __bpf_prog_offload_destroy(prog);
+       down_write(&bpf_devs_lock);
+       if (prog->aux->offload)
+               __bpf_prog_offload_destroy(prog);
+       up_write(&bpf_devs_lock);
        rtnl_unlock();
-
-       kfree(offload);
 }
 
 static int bpf_prog_offload_translate(struct bpf_prog *prog)
 {
-       struct bpf_dev_offload *offload = prog->aux->offload;
        struct netdev_bpf data = {};
        int ret;
 
        data.offload.prog = prog;
 
-       offload->verifier_running = false;
-       wake_up(&offload->verifier_done);
-
        rtnl_lock();
        ret = __bpf_offload_ndo(prog, BPF_OFFLOAD_TRANSLATE, &data);
        rtnl_unlock();
@@ -164,6 +178,63 @@ int bpf_prog_offload_compile(struct bpf_prog *prog)
        return bpf_prog_offload_translate(prog);
 }
 
+struct ns_get_path_bpf_prog_args {
+       struct bpf_prog *prog;
+       struct bpf_prog_info *info;
+};
+
+static struct ns_common *bpf_prog_offload_info_fill_ns(void *private_data)
+{
+       struct ns_get_path_bpf_prog_args *args = private_data;
+       struct bpf_prog_aux *aux = args->prog->aux;
+       struct ns_common *ns;
+       struct net *net;
+
+       rtnl_lock();
+       down_read(&bpf_devs_lock);
+
+       if (aux->offload) {
+               args->info->ifindex = aux->offload->netdev->ifindex;
+               net = dev_net(aux->offload->netdev);
+               get_net(net);
+               ns = &net->ns;
+       } else {
+               args->info->ifindex = 0;
+               ns = NULL;
+       }
+
+       up_read(&bpf_devs_lock);
+       rtnl_unlock();
+
+       return ns;
+}
+
+int bpf_prog_offload_info_fill(struct bpf_prog_info *info,
+                              struct bpf_prog *prog)
+{
+       struct ns_get_path_bpf_prog_args args = {
+               .prog   = prog,
+               .info   = info,
+       };
+       struct inode *ns_inode;
+       struct path ns_path;
+       void *res;
+
+       res = ns_get_path_cb(&ns_path, bpf_prog_offload_info_fill_ns, &args);
+       if (IS_ERR(res)) {
+               if (!info->ifindex)
+                       return -ENODEV;
+               return PTR_ERR(res);
+       }
+
+       ns_inode = ns_path.dentry->d_inode;
+       info->netns_dev = new_encode_dev(ns_inode->i_sb->s_dev);
+       info->netns_ino = ns_inode->i_ino;
+       path_put(&ns_path);
+
+       return 0;
+}
+
 const struct bpf_prog_ops bpf_offload_prog_ops = {
 };
 
@@ -181,11 +252,13 @@ static int bpf_offload_notification(struct notifier_block *notifier,
                if (netdev->reg_state != NETREG_UNREGISTERING)
                        break;
 
+               down_write(&bpf_devs_lock);
                list_for_each_entry_safe(offload, tmp, &bpf_prog_offload_devs,
                                         offloads) {
                        if (offload->netdev == netdev)
                                __bpf_prog_offload_destroy(offload->prog);
                }
+               up_write(&bpf_devs_lock);
                break;
        default:
                break;
index 5ee2e41..3f662ee 100644 (file)
@@ -96,14 +96,6 @@ static inline struct smap_psock *smap_psock_sk(const struct sock *sk)
        return rcu_dereference_sk_user_data(sk);
 }
 
-/* compute the linear packet data range [data, data_end) for skb when
- * sk_skb type programs are in use.
- */
-static inline void bpf_compute_data_end_sk_skb(struct sk_buff *skb)
-{
-       TCP_SKB_CB(skb)->bpf.data_end = skb->data + skb_headlen(skb);
-}
-
 enum __sk_action {
        __SK_DROP = 0,
        __SK_PASS,
index a15bc63..6c63c22 100644 (file)
@@ -226,9 +226,33 @@ int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value)
        return 0;
 }
 
-static int stack_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
+static int stack_map_get_next_key(struct bpf_map *map, void *key,
+                                 void *next_key)
 {
-       return -EINVAL;
+       struct bpf_stack_map *smap = container_of(map,
+                                                 struct bpf_stack_map, map);
+       u32 id;
+
+       WARN_ON_ONCE(!rcu_read_lock_held());
+
+       if (!key) {
+               id = 0;
+       } else {
+               id = *(u32 *)key;
+               if (id >= smap->n_buckets || !smap->buckets[id])
+                       id = 0;
+               else
+                       id++;
+       }
+
+       while (id < smap->n_buckets && !smap->buckets[id])
+               id++;
+
+       if (id >= smap->n_buckets)
+               return -ENOENT;
+
+       *(u32 *)next_key = id;
+       return 0;
 }
 
 static int stack_map_update_elem(struct bpf_map *map, void *key, void *value,
index e2e1c78..ebf0fb2 100644 (file)
@@ -905,9 +905,13 @@ static int bpf_prog_alloc_id(struct bpf_prog *prog)
        return id > 0 ? 0 : id;
 }
 
-static void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock)
+void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock)
 {
-       /* cBPF to eBPF migrations are currently not in the idr store. */
+       /* cBPF to eBPF migrations are currently not in the idr store.
+        * Offloaded programs are removed from the store when their device
+        * disappears - even if someone grabs an fd to them they are unusable,
+        * simply waiting for refcnt to drop to be freed.
+        */
        if (!prog->aux->id)
                return;
 
@@ -917,6 +921,7 @@ static void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock)
                __acquire(&prog_idr_lock);
 
        idr_remove(&prog_idr, prog->aux->id);
+       prog->aux->id = 0;
 
        if (do_idr_lock)
                spin_unlock_bh(&prog_idr_lock);
@@ -937,10 +942,16 @@ static void __bpf_prog_put_rcu(struct rcu_head *rcu)
 static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock)
 {
        if (atomic_dec_and_test(&prog->aux->refcnt)) {
+               int i;
+
                trace_bpf_prog_put_rcu(prog);
                /* bpf_prog_free_id() must be called first */
                bpf_prog_free_id(prog, do_idr_lock);
+
+               for (i = 0; i < prog->aux->func_cnt; i++)
+                       bpf_prog_kallsyms_del(prog->aux->func[i]);
                bpf_prog_kallsyms_del(prog);
+
                call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
        }
 }
@@ -1151,6 +1162,8 @@ static int bpf_prog_load(union bpf_attr *attr)
        if (!prog)
                return -ENOMEM;
 
+       prog->aux->offload_requested = !!attr->prog_ifindex;
+
        err = security_bpf_prog_alloc(prog->aux);
        if (err)
                goto free_prog_nouncharge;
@@ -1172,7 +1185,7 @@ static int bpf_prog_load(union bpf_attr *attr)
        atomic_set(&prog->aux->refcnt, 1);
        prog->gpl_compatible = is_gpl ? 1 : 0;
 
-       if (attr->prog_ifindex) {
+       if (bpf_prog_is_dev_bound(prog->aux)) {
                err = bpf_prog_offload_init(prog, attr);
                if (err)
                        goto free_prog;
@@ -1552,6 +1565,67 @@ static int bpf_map_get_fd_by_id(const union bpf_attr *attr)
        return fd;
 }
 
+static const struct bpf_map *bpf_map_from_imm(const struct bpf_prog *prog,
+                                             unsigned long addr)
+{
+       int i;
+
+       for (i = 0; i < prog->aux->used_map_cnt; i++)
+               if (prog->aux->used_maps[i] == (void *)addr)
+                       return prog->aux->used_maps[i];
+       return NULL;
+}
+
+static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog)
+{
+       const struct bpf_map *map;
+       struct bpf_insn *insns;
+       u64 imm;
+       int i;
+
+       insns = kmemdup(prog->insnsi, bpf_prog_insn_size(prog),
+                       GFP_USER);
+       if (!insns)
+               return insns;
+
+       for (i = 0; i < prog->len; i++) {
+               if (insns[i].code == (BPF_JMP | BPF_TAIL_CALL)) {
+                       insns[i].code = BPF_JMP | BPF_CALL;
+                       insns[i].imm = BPF_FUNC_tail_call;
+                       /* fall-through */
+               }
+               if (insns[i].code == (BPF_JMP | BPF_CALL) ||
+                   insns[i].code == (BPF_JMP | BPF_CALL_ARGS)) {
+                       if (insns[i].code == (BPF_JMP | BPF_CALL_ARGS))
+                               insns[i].code = BPF_JMP | BPF_CALL;
+                       if (!bpf_dump_raw_ok())
+                               insns[i].imm = 0;
+                       continue;
+               }
+
+               if (insns[i].code != (BPF_LD | BPF_IMM | BPF_DW))
+                       continue;
+
+               imm = ((u64)insns[i + 1].imm << 32) | (u32)insns[i].imm;
+               map = bpf_map_from_imm(prog, imm);
+               if (map) {
+                       insns[i].src_reg = BPF_PSEUDO_MAP_FD;
+                       insns[i].imm = map->id;
+                       insns[i + 1].imm = 0;
+                       continue;
+               }
+
+               if (!bpf_dump_raw_ok() &&
+                   imm == (unsigned long)prog->aux) {
+                       insns[i].imm = 0;
+                       insns[i + 1].imm = 0;
+                       continue;
+               }
+       }
+
+       return insns;
+}
+
 static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
                                   const union bpf_attr *attr,
                                   union bpf_attr __user *uattr)
@@ -1602,21 +1676,43 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
        ulen = info.jited_prog_len;
        info.jited_prog_len = prog->jited_len;
        if (info.jited_prog_len && ulen) {
-               uinsns = u64_to_user_ptr(info.jited_prog_insns);
-               ulen = min_t(u32, info.jited_prog_len, ulen);
-               if (copy_to_user(uinsns, prog->bpf_func, ulen))
-                       return -EFAULT;
+               if (bpf_dump_raw_ok()) {
+                       uinsns = u64_to_user_ptr(info.jited_prog_insns);
+                       ulen = min_t(u32, info.jited_prog_len, ulen);
+                       if (copy_to_user(uinsns, prog->bpf_func, ulen))
+                               return -EFAULT;
+               } else {
+                       info.jited_prog_insns = 0;
+               }
        }
 
        ulen = info.xlated_prog_len;
        info.xlated_prog_len = bpf_prog_insn_size(prog);
        if (info.xlated_prog_len && ulen) {
+               struct bpf_insn *insns_sanitized;
+               bool fault;
+
+               if (prog->blinded && !bpf_dump_raw_ok()) {
+                       info.xlated_prog_insns = 0;
+                       goto done;
+               }
+               insns_sanitized = bpf_insn_prepare_dump(prog);
+               if (!insns_sanitized)
+                       return -ENOMEM;
                uinsns = u64_to_user_ptr(info.xlated_prog_insns);
                ulen = min_t(u32, info.xlated_prog_len, ulen);
-               if (copy_to_user(uinsns, prog->insnsi, ulen))
+               fault = copy_to_user(uinsns, insns_sanitized, ulen);
+               kfree(insns_sanitized);
+               if (fault)
                        return -EFAULT;
        }
 
+       if (bpf_prog_is_dev_bound(prog->aux)) {
+               err = bpf_prog_offload_info_fill(&info, prog);
+               if (err)
+                       return err;
+       }
+
 done:
        if (copy_to_user(uinfo, &info, info_len) ||
            put_user(info_len, &uattr->info.info_len))
index 48b2901..a2b2112 100644 (file)
@@ -772,7 +772,7 @@ static int check_subprogs(struct bpf_verifier_env *env)
                        return -EPERM;
                }
                if (bpf_prog_is_dev_bound(env->prog->aux)) {
-                       verbose(env, "funcation calls in offloaded programs are not supported yet\n");
+                       verbose(env, "function calls in offloaded programs are not supported yet\n");
                        return -EINVAL;
                }
                ret = add_subprog(env, i + insn[i].imm + 1);
@@ -823,6 +823,7 @@ next:
        return 0;
 }
 
+static
 struct bpf_verifier_state *skip_callee(struct bpf_verifier_env *env,
                                       const struct bpf_verifier_state *state,
                                       struct bpf_verifier_state *parent,
@@ -867,7 +868,7 @@ bug:
        verbose(env, "verifier bug regno %d tmp %p\n", regno, tmp);
        verbose(env, "regno %d parent frame %d current frame %d\n",
                regno, parent->curframe, state->curframe);
-       return 0;
+       return NULL;
 }
 
 static int mark_reg_read(struct bpf_verifier_env *env,
@@ -1417,6 +1418,11 @@ static int check_ptr_alignment(struct bpf_verifier_env *env,
                break;
        case PTR_TO_STACK:
                pointer_desc = "stack ";
+               /* The stack spill tracking logic in check_stack_write()
+                * and check_stack_read() relies on stack accesses being
+                * aligned.
+                */
+               strict = true;
                break;
        default:
                break;
@@ -1429,33 +1435,80 @@ static int update_stack_depth(struct bpf_verifier_env *env,
                              const struct bpf_func_state *func,
                              int off)
 {
-       u16 stack = env->subprog_stack_depth[func->subprogno], total = 0;
-       struct bpf_verifier_state *cur = env->cur_state;
-       int i;
+       u16 stack = env->subprog_stack_depth[func->subprogno];
 
        if (stack >= -off)
                return 0;
 
        /* update known max for given subprogram */
        env->subprog_stack_depth[func->subprogno] = -off;
+       return 0;
+}
 
-       /* compute the total for current call chain */
-       for (i = 0; i <= cur->curframe; i++) {
-               u32 depth = env->subprog_stack_depth[cur->frame[i]->subprogno];
-
-               /* round up to 32-bytes, since this is granularity
-                * of interpreter stack sizes
-                */
-               depth = round_up(depth, 32);
-               total += depth;
-       }
+/* starting from main bpf function walk all instructions of the function
+ * and recursively walk all callees that given function can call.
+ * Ignore jump and exit insns.
+ * Since recursion is prevented by check_cfg() this algorithm
+ * only needs a local stack of MAX_CALL_FRAMES to remember callsites
+ */
+static int check_max_stack_depth(struct bpf_verifier_env *env)
+{
+       int depth = 0, frame = 0, subprog = 0, i = 0, subprog_end;
+       struct bpf_insn *insn = env->prog->insnsi;
+       int insn_cnt = env->prog->len;
+       int ret_insn[MAX_CALL_FRAMES];
+       int ret_prog[MAX_CALL_FRAMES];
 
-       if (total > MAX_BPF_STACK) {
+process_func:
+       /* round up to 32-bytes, since this is granularity
+        * of interpreter stack size
+        */
+       depth += round_up(max_t(u32, env->subprog_stack_depth[subprog], 1), 32);
+       if (depth > MAX_BPF_STACK) {
                verbose(env, "combined stack size of %d calls is %d. Too large\n",
-                       cur->curframe, total);
+                       frame + 1, depth);
                return -EACCES;
        }
-       return 0;
+continue_func:
+       if (env->subprog_cnt == subprog)
+               subprog_end = insn_cnt;
+       else
+               subprog_end = env->subprog_starts[subprog];
+       for (; i < subprog_end; i++) {
+               if (insn[i].code != (BPF_JMP | BPF_CALL))
+                       continue;
+               if (insn[i].src_reg != BPF_PSEUDO_CALL)
+                       continue;
+               /* remember insn and function to return to */
+               ret_insn[frame] = i + 1;
+               ret_prog[frame] = subprog;
+
+               /* find the callee */
+               i = i + insn[i].imm + 1;
+               subprog = find_subprog(env, i);
+               if (subprog < 0) {
+                       WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
+                                 i);
+                       return -EFAULT;
+               }
+               subprog++;
+               frame++;
+               if (frame >= MAX_CALL_FRAMES) {
+                       WARN_ONCE(1, "verifier bug. Call stack is too deep\n");
+                       return -EFAULT;
+               }
+               goto process_func;
+       }
+       /* end of for() loop means the last insn of the 'subprog'
+        * was reached. Doesn't matter whether it was JA or EXIT
+        */
+       if (frame == 0)
+               return 0;
+       depth -= round_up(max_t(u32, env->subprog_stack_depth[subprog], 1), 32);
+       frame--;
+       i = ret_insn[frame];
+       subprog = ret_prog[frame];
+       goto continue_func;
 }
 
 static int get_callee_stack_depth(struct bpf_verifier_env *env,
@@ -1473,6 +1526,29 @@ static int get_callee_stack_depth(struct bpf_verifier_env *env,
        return env->subprog_stack_depth[subprog];
 }
 
+/* truncate register to smaller size (in bytes)
+ * must be called with size < BPF_REG_SIZE
+ */
+static void coerce_reg_to_size(struct bpf_reg_state *reg, int size)
+{
+       u64 mask;
+
+       /* clear high bits in bit representation */
+       reg->var_off = tnum_cast(reg->var_off, size);
+
+       /* fix arithmetic bounds */
+       mask = ((u64)1 << (size * 8)) - 1;
+       if ((reg->umin_value & ~mask) == (reg->umax_value & ~mask)) {
+               reg->umin_value &= mask;
+               reg->umax_value &= mask;
+       } else {
+               reg->umin_value = 0;
+               reg->umax_value = mask;
+       }
+       reg->smin_value = reg->umin_value;
+       reg->smax_value = reg->umax_value;
+}
+
 /* check whether memory at (regno + off) is accessible for t = (read | write)
  * if t==write, value_regno is a register which value is stored into memory
  * if t==read, value_regno is a register which will receive the value from memory
@@ -1608,9 +1684,7 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
        if (!err && size < BPF_REG_SIZE && value_regno >= 0 && t == BPF_READ &&
            regs[value_regno].type == SCALAR_VALUE) {
                /* b/h/w load zero-extends, mark upper bits as known 0 */
-               regs[value_regno].var_off =
-                       tnum_cast(regs[value_regno].var_off, size);
-               __update_reg_bounds(&regs[value_regno]);
+               coerce_reg_to_size(&regs[value_regno], size);
        }
        return err;
 }
@@ -1684,6 +1758,7 @@ static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
                tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
                verbose(env, "invalid variable stack read R%d var_off=%s\n",
                        regno, tn_buf);
+               return -EACCES;
        }
        off = reg->off + reg->var_off.value;
        if (off >= 0 || off < -MAX_BPF_STACK || off + access_size > 0 ||
@@ -2078,9 +2153,9 @@ static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
        struct bpf_func_state *caller, *callee;
        int i, subprog, target_insn;
 
-       if (state->curframe >= MAX_CALL_FRAMES) {
+       if (state->curframe + 1 >= MAX_CALL_FRAMES) {
                verbose(env, "the call stack of %d frames is too deep\n",
-                       state->curframe);
+                       state->curframe + 2);
                return -E2BIG;
        }
 
@@ -2206,7 +2281,13 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
                return -EINVAL;
        }
 
+       /* With LD_ABS/IND some JITs save/restore skb from r1. */
        changes_data = bpf_helper_changes_pkt_data(fn->func);
+       if (changes_data && fn->arg1_type != ARG_PTR_TO_CTX) {
+               verbose(env, "kernel subsystem misconfigured func %s#%d: r1 != ctx\n",
+                       func_id_name(func_id), func_id);
+               return -EINVAL;
+       }
 
        memset(&meta, 0, sizeof(meta));
        meta.pkt_access = fn->pkt_access;
@@ -2298,14 +2379,6 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
        return 0;
 }
 
-static void coerce_reg_to_32(struct bpf_reg_state *reg)
-{
-       /* clear high 32 bits */
-       reg->var_off = tnum_cast(reg->var_off, 4);
-       /* Update bounds */
-       __update_reg_bounds(reg);
-}
-
 static bool signed_add_overflows(s64 a, s64 b)
 {
        /* Do the add in u64, where overflow is well-defined */
@@ -2326,6 +2399,41 @@ static bool signed_sub_overflows(s64 a, s64 b)
        return res > a;
 }
 
+static bool check_reg_sane_offset(struct bpf_verifier_env *env,
+                                 const struct bpf_reg_state *reg,
+                                 enum bpf_reg_type type)
+{
+       bool known = tnum_is_const(reg->var_off);
+       s64 val = reg->var_off.value;
+       s64 smin = reg->smin_value;
+
+       if (known && (val >= BPF_MAX_VAR_OFF || val <= -BPF_MAX_VAR_OFF)) {
+               verbose(env, "math between %s pointer and %lld is not allowed\n",
+                       reg_type_str[type], val);
+               return false;
+       }
+
+       if (reg->off >= BPF_MAX_VAR_OFF || reg->off <= -BPF_MAX_VAR_OFF) {
+               verbose(env, "%s pointer offset %d is not allowed\n",
+                       reg_type_str[type], reg->off);
+               return false;
+       }
+
+       if (smin == S64_MIN) {
+               verbose(env, "math between %s pointer and register with unbounded min value is not allowed\n",
+                       reg_type_str[type]);
+               return false;
+       }
+
+       if (smin >= BPF_MAX_VAR_OFF || smin <= -BPF_MAX_VAR_OFF) {
+               verbose(env, "value %lld makes %s pointer be out of bounds\n",
+                       smin, reg_type_str[type]);
+               return false;
+       }
+
+       return true;
+}
+
 /* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off.
  * Caller should also handle BPF_MOV case separately.
  * If we return -EACCES, caller may want to try again treating pointer as a
@@ -2364,29 +2472,25 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
 
        if (BPF_CLASS(insn->code) != BPF_ALU64) {
                /* 32-bit ALU ops on pointers produce (meaningless) scalars */
-               if (!env->allow_ptr_leaks)
-                       verbose(env,
-                               "R%d 32-bit pointer arithmetic prohibited\n",
-                               dst);
+               verbose(env,
+                       "R%d 32-bit pointer arithmetic prohibited\n",
+                       dst);
                return -EACCES;
        }
 
        if (ptr_reg->type == PTR_TO_MAP_VALUE_OR_NULL) {
-               if (!env->allow_ptr_leaks)
-                       verbose(env, "R%d pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL prohibited, null-check it first\n",
-                               dst);
+               verbose(env, "R%d pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL prohibited, null-check it first\n",
+                       dst);
                return -EACCES;
        }
        if (ptr_reg->type == CONST_PTR_TO_MAP) {
-               if (!env->allow_ptr_leaks)
-                       verbose(env, "R%d pointer arithmetic on CONST_PTR_TO_MAP prohibited\n",
-                               dst);
+               verbose(env, "R%d pointer arithmetic on CONST_PTR_TO_MAP prohibited\n",
+                       dst);
                return -EACCES;
        }
        if (ptr_reg->type == PTR_TO_PACKET_END) {
-               if (!env->allow_ptr_leaks)
-                       verbose(env, "R%d pointer arithmetic on PTR_TO_PACKET_END prohibited\n",
-                               dst);
+               verbose(env, "R%d pointer arithmetic on PTR_TO_PACKET_END prohibited\n",
+                       dst);
                return -EACCES;
        }
 
@@ -2396,6 +2500,10 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
        dst_reg->type = ptr_reg->type;
        dst_reg->id = ptr_reg->id;
 
+       if (!check_reg_sane_offset(env, off_reg, ptr_reg->type) ||
+           !check_reg_sane_offset(env, ptr_reg, ptr_reg->type))
+               return -EINVAL;
+
        switch (opcode) {
        case BPF_ADD:
                /* We can take a fixed offset as long as it doesn't overflow
@@ -2449,9 +2557,8 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
        case BPF_SUB:
                if (dst_reg == off_reg) {
                        /* scalar -= pointer.  Creates an unknown scalar */
-                       if (!env->allow_ptr_leaks)
-                               verbose(env, "R%d tried to subtract pointer from scalar\n",
-                                       dst);
+                       verbose(env, "R%d tried to subtract pointer from scalar\n",
+                               dst);
                        return -EACCES;
                }
                /* We don't allow subtraction from FP, because (according to
@@ -2459,9 +2566,8 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
                 * be able to deal with it.
                 */
                if (ptr_reg->type == PTR_TO_STACK) {
-                       if (!env->allow_ptr_leaks)
-                               verbose(env, "R%d subtraction from stack pointer prohibited\n",
-                                       dst);
+                       verbose(env, "R%d subtraction from stack pointer prohibited\n",
+                               dst);
                        return -EACCES;
                }
                if (known && (ptr_reg->off - smin_val ==
@@ -2510,28 +2616,30 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
        case BPF_AND:
        case BPF_OR:
        case BPF_XOR:
-               /* bitwise ops on pointers are troublesome, prohibit for now.
-                * (However, in principle we could allow some cases, e.g.
-                * ptr &= ~3 which would reduce min_value by 3.)
-                */
-               if (!env->allow_ptr_leaks)
-                       verbose(env, "R%d bitwise operator %s on pointer prohibited\n",
-                               dst, bpf_alu_string[opcode >> 4]);
+               /* bitwise ops on pointers are troublesome, prohibit. */
+               verbose(env, "R%d bitwise operator %s on pointer prohibited\n",
+                       dst, bpf_alu_string[opcode >> 4]);
                return -EACCES;
        default:
                /* other operators (e.g. MUL,LSH) produce non-pointer results */
-               if (!env->allow_ptr_leaks)
-                       verbose(env, "R%d pointer arithmetic with %s operator prohibited\n",
-                               dst, bpf_alu_string[opcode >> 4]);
+               verbose(env, "R%d pointer arithmetic with %s operator prohibited\n",
+                       dst, bpf_alu_string[opcode >> 4]);
                return -EACCES;
        }
 
+       if (!check_reg_sane_offset(env, dst_reg, ptr_reg->type))
+               return -EINVAL;
+
        __update_reg_bounds(dst_reg);
        __reg_deduce_bounds(dst_reg);
        __reg_bound_offset(dst_reg);
        return 0;
 }
 
+/* WARNING: This function does calculations on 64-bit values, but the actual
+ * execution may occur on 32-bit values. Therefore, things like bitshifts
+ * need extra checks in the 32-bit case.
+ */
 static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
                                      struct bpf_insn *insn,
                                      struct bpf_reg_state *dst_reg,
@@ -2542,12 +2650,8 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
        bool src_known, dst_known;
        s64 smin_val, smax_val;
        u64 umin_val, umax_val;
+       u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32;
 
-       if (BPF_CLASS(insn->code) != BPF_ALU64) {
-               /* 32-bit ALU ops are (32,32)->64 */
-               coerce_reg_to_32(dst_reg);
-               coerce_reg_to_32(&src_reg);
-       }
        smin_val = src_reg.smin_value;
        smax_val = src_reg.smax_value;
        umin_val = src_reg.umin_value;
@@ -2555,6 +2659,12 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
        src_known = tnum_is_const(src_reg.var_off);
        dst_known = tnum_is_const(dst_reg->var_off);
 
+       if (!src_known &&
+           opcode != BPF_ADD && opcode != BPF_SUB && opcode != BPF_AND) {
+               __mark_reg_unknown(dst_reg);
+               return 0;
+       }
+
        switch (opcode) {
        case BPF_ADD:
                if (signed_add_overflows(dst_reg->smin_value, smin_val) ||
@@ -2683,9 +2793,9 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
                __update_reg_bounds(dst_reg);
                break;
        case BPF_LSH:
-               if (umax_val > 63) {
-                       /* Shifts greater than 63 are undefined.  This includes
-                        * shifts by a negative number.
+               if (umax_val >= insn_bitness) {
+                       /* Shifts greater than 31 or 63 are undefined.
+                        * This includes shifts by a negative number.
                         */
                        mark_reg_unknown(env, regs, insn->dst_reg);
                        break;
@@ -2711,27 +2821,29 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
                __update_reg_bounds(dst_reg);
                break;
        case BPF_RSH:
-               if (umax_val > 63) {
-                       /* Shifts greater than 63 are undefined.  This includes
-                        * shifts by a negative number.
+               if (umax_val >= insn_bitness) {
+                       /* Shifts greater than 31 or 63 are undefined.
+                        * This includes shifts by a negative number.
                         */
                        mark_reg_unknown(env, regs, insn->dst_reg);
                        break;
                }
-               /* BPF_RSH is an unsigned shift, so make the appropriate casts */
-               if (dst_reg->smin_value < 0) {
-                       if (umin_val) {
-                               /* Sign bit will be cleared */
-                               dst_reg->smin_value = 0;
-                       } else {
-                               /* Lost sign bit information */
-                               dst_reg->smin_value = S64_MIN;
-                               dst_reg->smax_value = S64_MAX;
-                       }
-               } else {
-                       dst_reg->smin_value =
-                               (u64)(dst_reg->smin_value) >> umax_val;
-               }
+               /* BPF_RSH is an unsigned shift.  If the value in dst_reg might
+                * be negative, then either:
+                * 1) src_reg might be zero, so the sign bit of the result is
+                *    unknown, so we lose our signed bounds
+                * 2) it's known negative, thus the unsigned bounds capture the
+                *    signed bounds
+                * 3) the signed bounds cross zero, so they tell us nothing
+                *    about the result
+                * If the value in dst_reg is known nonnegative, then again the
+                * unsigned bounts capture the signed bounds.
+                * Thus, in all cases it suffices to blow away our signed bounds
+                * and rely on inferring new ones from the unsigned bounds and
+                * var_off of the result.
+                */
+               dst_reg->smin_value = S64_MIN;
+               dst_reg->smax_value = S64_MAX;
                if (src_known)
                        dst_reg->var_off = tnum_rshift(dst_reg->var_off,
                                                       umin_val);
@@ -2747,6 +2859,12 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
                break;
        }
 
+       if (BPF_CLASS(insn->code) != BPF_ALU64) {
+               /* 32-bit ALU ops are (32,32)->32 */
+               coerce_reg_to_size(dst_reg, 4);
+               coerce_reg_to_size(&src_reg, 4);
+       }
+
        __reg_deduce_bounds(dst_reg);
        __reg_bound_offset(dst_reg);
        return 0;
@@ -2763,7 +2881,6 @@ static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
        struct bpf_reg_state *regs = state->regs, *dst_reg, *src_reg;
        struct bpf_reg_state *ptr_reg = NULL, off_reg = {0};
        u8 opcode = BPF_OP(insn->code);
-       int rc;
 
        dst_reg = &regs[insn->dst_reg];
        src_reg = NULL;
@@ -2774,43 +2891,29 @@ static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
                if (src_reg->type != SCALAR_VALUE) {
                        if (dst_reg->type != SCALAR_VALUE) {
                                /* Combining two pointers by any ALU op yields
-                                * an arbitrary scalar.
+                                * an arbitrary scalar. Disallow all math except
+                                * pointer subtraction
                                 */
-                               if (!env->allow_ptr_leaks) {
-                                       verbose(env, "R%d pointer %s pointer prohibited\n",
-                                               insn->dst_reg,
-                                               bpf_alu_string[opcode >> 4]);
-                                       return -EACCES;
+                               if (opcode == BPF_SUB){
+                                       mark_reg_unknown(env, regs, insn->dst_reg);
+                                       return 0;
                                }
-                               mark_reg_unknown(env, regs, insn->dst_reg);
-                               return 0;
+                               verbose(env, "R%d pointer %s pointer prohibited\n",
+                                       insn->dst_reg,
+                                       bpf_alu_string[opcode >> 4]);
+                               return -EACCES;
                        } else {
                                /* scalar += pointer
                                 * This is legal, but we have to reverse our
                                 * src/dest handling in computing the range
                                 */
-                               rc = adjust_ptr_min_max_vals(env, insn,
-                                                            src_reg, dst_reg);
-                               if (rc == -EACCES && env->allow_ptr_leaks) {
-                                       /* scalar += unknown scalar */
-                                       __mark_reg_unknown(&off_reg);
-                                       return adjust_scalar_min_max_vals(
-                                                       env, insn,
-                                                       dst_reg, off_reg);
-                               }
-                               return rc;
+                               return adjust_ptr_min_max_vals(env, insn,
+                                                              src_reg, dst_reg);
                        }
                } else if (ptr_reg) {
                        /* pointer += scalar */
-                       rc = adjust_ptr_min_max_vals(env, insn,
-                                                    dst_reg, src_reg);
-                       if (rc == -EACCES && env->allow_ptr_leaks) {
-                               /* unknown scalar += scalar */
-                               __mark_reg_unknown(dst_reg);
-                               return adjust_scalar_min_max_vals(
-                                               env, insn, dst_reg, *src_reg);
-                       }
-                       return rc;
+                       return adjust_ptr_min_max_vals(env, insn,
+                                                      dst_reg, src_reg);
                }
        } else {
                /* Pretend the src is a reg with a known value, since we only
@@ -2819,17 +2922,9 @@ static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
                off_reg.type = SCALAR_VALUE;
                __mark_reg_known(&off_reg, insn->imm);
                src_reg = &off_reg;
-               if (ptr_reg) { /* pointer += K */
-                       rc = adjust_ptr_min_max_vals(env, insn,
-                                                    ptr_reg, src_reg);
-                       if (rc == -EACCES && env->allow_ptr_leaks) {
-                               /* unknown scalar += K */
-                               __mark_reg_unknown(dst_reg);
-                               return adjust_scalar_min_max_vals(
-                                               env, insn, dst_reg, off_reg);
-                       }
-                       return rc;
-               }
+               if (ptr_reg) /* pointer += K */
+                       return adjust_ptr_min_max_vals(env, insn,
+                                                      ptr_reg, src_reg);
        }
 
        /* Got here implies adding two SCALAR_VALUEs */
@@ -2926,17 +3021,20 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
                                        return -EACCES;
                                }
                                mark_reg_unknown(env, regs, insn->dst_reg);
-                               /* high 32 bits are known zero. */
-                               regs[insn->dst_reg].var_off = tnum_cast(
-                                               regs[insn->dst_reg].var_off, 4);
-                               __update_reg_bounds(&regs[insn->dst_reg]);
+                               coerce_reg_to_size(&regs[insn->dst_reg], 4);
                        }
                } else {
                        /* case: R = imm
                         * remember the value we stored into this reg
                         */
                        regs[insn->dst_reg].type = SCALAR_VALUE;
-                       __mark_reg_known(regs + insn->dst_reg, insn->imm);
+                       if (BPF_CLASS(insn->code) == BPF_ALU64) {
+                               __mark_reg_known(regs + insn->dst_reg,
+                                                insn->imm);
+                       } else {
+                               __mark_reg_known(regs + insn->dst_reg,
+                                                (u32)insn->imm);
+                       }
                }
 
        } else if (opcode > BPF_END) {
@@ -4013,15 +4111,14 @@ static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur,
                        return range_within(rold, rcur) &&
                               tnum_in(rold->var_off, rcur->var_off);
                } else {
-                       /* if we knew anything about the old value, we're not
-                        * equal, because we can't know anything about the
-                        * scalar value of the pointer in the new value.
+                       /* We're trying to use a pointer in place of a scalar.
+                        * Even if the scalar was unbounded, this could lead to
+                        * pointer leaks because scalars are allowed to leak
+                        * while pointers are not. We could make this safe in
+                        * special cases if root is calling us, but it's
+                        * probably not worth the hassle.
                         */
-                       return rold->umin_value == 0 &&
-                              rold->umax_value == U64_MAX &&
-                              rold->smin_value == S64_MIN &&
-                              rold->smax_value == S64_MAX &&
-                              tnum_is_unknown(rold->var_off);
+                       return false;
                }
        case PTR_TO_MAP_VALUE:
                /* If the new min/max/var_off satisfy the old ones and
@@ -4106,7 +4203,7 @@ static bool stacksafe(struct bpf_func_state *old,
 
                if (!(old->stack[spi].spilled_ptr.live & REG_LIVE_READ))
                        /* explored state didn't use this */
-                       return true;
+                       continue;
 
                if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_INVALID)
                        continue;
@@ -4341,15 +4438,6 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
        return 0;
 }
 
-static int ext_analyzer_insn_hook(struct bpf_verifier_env *env,
-                                 int insn_idx, int prev_insn_idx)
-{
-       if (env->dev_ops && env->dev_ops->insn_hook)
-               return env->dev_ops->insn_hook(env, insn_idx, prev_insn_idx);
-
-       return 0;
-}
-
 static int do_check(struct bpf_verifier_env *env)
 {
        struct bpf_verifier_state *state;
@@ -4426,14 +4514,20 @@ static int do_check(struct bpf_verifier_env *env)
                }
 
                if (env->log.level) {
+                       const struct bpf_insn_cbs cbs = {
+                               .cb_print       = verbose,
+                       };
+
                        verbose(env, "%d: ", insn_idx);
-                       print_bpf_insn(verbose, env, insn,
-                                      env->allow_ptr_leaks);
+                       print_bpf_insn(&cbs, env, insn, env->allow_ptr_leaks);
                }
 
-               err = ext_analyzer_insn_hook(env, insn_idx, prev_insn_idx);
-               if (err)
-                       return err;
+               if (bpf_prog_is_dev_bound(env->prog->aux)) {
+                       err = bpf_prog_offload_verify_insn(env, insn_idx,
+                                                          prev_insn_idx);
+                       if (err)
+                               return err;
+               }
 
                regs = cur_regs(env);
                env->insn_aux_data[insn_idx].seen = true;
@@ -5016,14 +5110,14 @@ static int jit_subprogs(struct bpf_verifier_env *env)
 {
        struct bpf_prog *prog = env->prog, **func, *tmp;
        int i, j, subprog_start, subprog_end = 0, len, subprog;
-       struct bpf_insn *insn = prog->insnsi;
+       struct bpf_insn *insn;
        void *old_bpf_func;
        int err = -ENOMEM;
 
        if (env->subprog_cnt == 0)
                return 0;
 
-       for (i = 0; i < prog->len; i++, insn++) {
+       for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
                if (insn->code != (BPF_JMP | BPF_CALL) ||
                    insn->src_reg != BPF_PSEUDO_CALL)
                        continue;
@@ -5062,7 +5156,10 @@ static int jit_subprogs(struct bpf_verifier_env *env)
                        goto out_free;
                memcpy(func[i]->insnsi, &prog->insnsi[subprog_start],
                       len * sizeof(struct bpf_insn));
+               func[i]->type = prog->type;
                func[i]->len = len;
+               if (bpf_prog_calc_tag(func[i]))
+                       goto out_free;
                func[i]->is_func = 1;
                /* Use bpf_prog_F_tag to indicate functions in stack traces.
                 * Long term would need debug info to populate names
@@ -5112,6 +5209,25 @@ static int jit_subprogs(struct bpf_verifier_env *env)
                bpf_prog_lock_ro(func[i]);
                bpf_prog_kallsyms_add(func[i]);
        }
+
+       /* Last step: make now unused interpreter insns from main
+        * prog consistent for later dump requests, so they can
+        * later look the same as if they were interpreted only.
+        */
+       for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
+               unsigned long addr;
+
+               if (insn->code != (BPF_JMP | BPF_CALL) ||
+                   insn->src_reg != BPF_PSEUDO_CALL)
+                       continue;
+               insn->off = env->insn_aux_data[i].call_imm;
+               subprog = find_subprog(env, i + insn->off + 1);
+               addr  = (unsigned long)func[subprog + 1]->bpf_func;
+               addr &= PAGE_MASK;
+               insn->imm = (u64 (*)(u64, u64, u64, u64, u64))
+                           addr - __bpf_call_base;
+       }
+
        prog->jited = 1;
        prog->bpf_func = func[0]->bpf_func;
        prog->aux->func = func;
@@ -5341,7 +5457,7 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr)
        if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
                env->strict_alignment = true;
 
-       if (env->prog->aux->offload) {
+       if (bpf_prog_is_dev_bound(env->prog->aux)) {
                ret = bpf_prog_offload_verifier_prep(env);
                if (ret)
                        goto err_unlock;
@@ -5378,6 +5494,9 @@ skip_full_check:
                sanitize_dead_code(env);
 
        if (ret == 0)
+               ret = check_max_stack_depth(env);
+
+       if (ret == 0)
                /* program is valid, convert *(u32*)(ctx + off) accesses */
                ret = convert_ctx_accesses(env);
 
index 432eadf..2295fc6 100644 (file)
@@ -721,8 +721,7 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,
                        goto out;
        }
        /* a new mm has just been created */
-       arch_dup_mmap(oldmm, mm);
-       retval = 0;
+       retval = arch_dup_mmap(oldmm, mm);
 out:
        up_write(&mm->mmap_sem);
        flush_tlb_mm(oldmm);
index 13d6881..ec999f3 100644 (file)
@@ -434,17 +434,22 @@ static struct pid *good_sigevent(sigevent_t * event)
 {
        struct task_struct *rtn = current->group_leader;
 
-       if ((event->sigev_notify & SIGEV_THREAD_ID ) &&
-               (!(rtn = find_task_by_vpid(event->sigev_notify_thread_id)) ||
-                !same_thread_group(rtn, current) ||
-                (event->sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_SIGNAL))
+       switch (event->sigev_notify) {
+       case SIGEV_SIGNAL | SIGEV_THREAD_ID:
+               rtn = find_task_by_vpid(event->sigev_notify_thread_id);
+               if (!rtn || !same_thread_group(rtn, current))
+                       return NULL;
+               /* FALLTHRU */
+       case SIGEV_SIGNAL:
+       case SIGEV_THREAD:
+               if (event->sigev_signo <= 0 || event->sigev_signo > SIGRTMAX)
+                       return NULL;
+               /* FALLTHRU */
+       case SIGEV_NONE:
+               return task_pid(rtn);
+       default:
                return NULL;
-
-       if (((event->sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE) &&
-           ((event->sigev_signo <= 0) || (event->sigev_signo > SIGRTMAX)))
-               return NULL;
-
-       return task_pid(rtn);
+       }
 }
 
 static struct k_itimer * alloc_posix_timer(void)
@@ -669,7 +674,7 @@ void common_timer_get(struct k_itimer *timr, struct itimerspec64 *cur_setting)
        struct timespec64 ts64;
        bool sig_none;
 
-       sig_none = (timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE;
+       sig_none = timr->it_sigev_notify == SIGEV_NONE;
        iv = timr->it_interval;
 
        /* interval timer ? */
@@ -856,7 +861,7 @@ int common_timer_set(struct k_itimer *timr, int flags,
 
        timr->it_interval = timespec64_to_ktime(new_setting->it_interval);
        expires = timespec64_to_ktime(new_setting->it_value);
-       sigev_none = (timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE;
+       sigev_none = timr->it_sigev_notify == SIGEV_NONE;
 
        kc->timer_arm(timr, expires, flags & TIMER_ABSTIME, sigev_none);
        timr->it_active = !sigev_none;
index c87766c..9ab1899 100644 (file)
@@ -280,6 +280,8 @@ EXPORT_SYMBOL_GPL(ring_buffer_event_data);
 /* Missed count stored at end */
 #define RB_MISSED_STORED       (1 << 30)
 
+#define RB_MISSED_FLAGS                (RB_MISSED_EVENTS|RB_MISSED_STORED)
+
 struct buffer_data_page {
        u64              time_stamp;    /* page time stamp */
        local_t          commit;        /* write committed index */
@@ -331,7 +333,9 @@ static void rb_init_page(struct buffer_data_page *bpage)
  */
 size_t ring_buffer_page_len(void *page)
 {
-       return local_read(&((struct buffer_data_page *)page)->commit)
+       struct buffer_data_page *bpage = page;
+
+       return (local_read(&bpage->commit) & ~RB_MISSED_FLAGS)
                + BUF_PAGE_HDR_SIZE;
 }
 
@@ -4400,8 +4404,13 @@ void ring_buffer_free_read_page(struct ring_buffer *buffer, int cpu, void *data)
 {
        struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
        struct buffer_data_page *bpage = data;
+       struct page *page = virt_to_page(bpage);
        unsigned long flags;
 
+       /* If the page is still in use someplace else, we can't reuse it */
+       if (page_ref_count(page) > 1)
+               goto out;
+
        local_irq_save(flags);
        arch_spin_lock(&cpu_buffer->lock);
 
@@ -4413,6 +4422,7 @@ void ring_buffer_free_read_page(struct ring_buffer *buffer, int cpu, void *data)
        arch_spin_unlock(&cpu_buffer->lock);
        local_irq_restore(flags);
 
+ out:
        free_page((unsigned long)bpage);
 }
 EXPORT_SYMBOL_GPL(ring_buffer_free_read_page);
index 59518b8..2a8d8a2 100644 (file)
@@ -6769,7 +6769,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
                .spd_release    = buffer_spd_release,
        };
        struct buffer_ref *ref;
-       int entries, size, i;
+       int entries, i;
        ssize_t ret = 0;
 
 #ifdef CONFIG_TRACER_MAX_TRACE
@@ -6823,14 +6823,6 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
                        break;
                }
 
-               /*
-                * zero out any left over data, this is going to
-                * user land.
-                */
-               size = ring_buffer_page_len(ref->page);
-               if (size < PAGE_SIZE)
-                       memset(ref->page + size, 0, PAGE_SIZE - size);
-
                page = virt_to_page(ref->page);
 
                spd.pages[i] = page;
@@ -7588,6 +7580,7 @@ allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size
        buf->data = alloc_percpu(struct trace_array_cpu);
        if (!buf->data) {
                ring_buffer_free(buf->buffer);
+               buf->buffer = NULL;
                return -ENOMEM;
        }
 
@@ -7611,7 +7604,9 @@ static int allocate_trace_buffers(struct trace_array *tr, int size)
                                    allocate_snapshot ? size : 1);
        if (WARN_ON(ret)) {
                ring_buffer_free(tr->trace_buffer.buffer);
+               tr->trace_buffer.buffer = NULL;
                free_percpu(tr->trace_buffer.data);
+               tr->trace_buffer.data = NULL;
                return -ENOMEM;
        }
        tr->allocated_snapshot = allocate_snapshot;
index aa8812a..9e97480 100644 (file)
@@ -435,6 +435,41 @@ loop:
        return 0;
 }
 
+static int bpf_fill_ld_abs_vlan_push_pop2(struct bpf_test *self)
+{
+       struct bpf_insn *insn;
+
+       insn = kmalloc_array(16, sizeof(*insn), GFP_KERNEL);
+       if (!insn)
+               return -ENOMEM;
+
+       /* Due to func address being non-const, we need to
+        * assemble this here.
+        */
+       insn[0] = BPF_MOV64_REG(R6, R1);
+       insn[1] = BPF_LD_ABS(BPF_B, 0);
+       insn[2] = BPF_LD_ABS(BPF_H, 0);
+       insn[3] = BPF_LD_ABS(BPF_W, 0);
+       insn[4] = BPF_MOV64_REG(R7, R6);
+       insn[5] = BPF_MOV64_IMM(R6, 0);
+       insn[6] = BPF_MOV64_REG(R1, R7);
+       insn[7] = BPF_MOV64_IMM(R2, 1);
+       insn[8] = BPF_MOV64_IMM(R3, 2);
+       insn[9] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                              bpf_skb_vlan_push_proto.func - __bpf_call_base);
+       insn[10] = BPF_MOV64_REG(R6, R7);
+       insn[11] = BPF_LD_ABS(BPF_B, 0);
+       insn[12] = BPF_LD_ABS(BPF_H, 0);
+       insn[13] = BPF_LD_ABS(BPF_W, 0);
+       insn[14] = BPF_MOV64_IMM(R0, 42);
+       insn[15] = BPF_EXIT_INSN();
+
+       self->u.ptr.insns = insn;
+       self->u.ptr.len = 16;
+
+       return 0;
+}
+
 static int bpf_fill_jump_around_ld_abs(struct bpf_test *self)
 {
        unsigned int len = BPF_MAXINSNS;
@@ -6066,6 +6101,14 @@ static struct bpf_test tests[] = {
                {},
                { {0x1, 0x42 } },
        },
+       {
+               "LD_ABS with helper changing skb data",
+               { },
+               INTERNAL,
+               { 0x34 },
+               { { ETH_HLEN, 42 } },
+               .fill_helper = bpf_fill_ld_abs_vlan_push_pop2,
+       },
 };
 
 static struct net_device dev;
index 84b2dc7..b5f940c 100644 (file)
@@ -882,13 +882,10 @@ int bdi_register_va(struct backing_dev_info *bdi, const char *fmt, va_list args)
        if (IS_ERR(dev))
                return PTR_ERR(dev);
 
-       if (bdi_debug_register(bdi, dev_name(dev))) {
-               device_destroy(bdi_class, dev->devt);
-               return -ENOMEM;
-       }
        cgwb_bdi_register(bdi);
        bdi->dev = dev;
 
+       bdi_debug_register(bdi, dev_name(dev));
        set_bit(WB_registered, &bdi->wb.state);
 
        spin_lock_bh(&bdi_lock);
index 9dba271..efe930d 100644 (file)
@@ -336,23 +336,6 @@ config NET_PKTGEN
          To compile this code as a module, choose M here: the
          module will be called pktgen.
 
-config NET_TCPPROBE
-       tristate "TCP connection probing"
-       depends on INET && PROC_FS && KPROBES
-       ---help---
-       This module allows for capturing the changes to TCP connection
-       state in response to incoming packets. It is used for debugging
-       TCP congestion avoidance modules. If you don't understand
-       what was just said, you don't need it: say N.
-
-       Documentation on how to use TCP connection probing can be found
-       at:
-       
-         http://www.linuxfoundation.org/collaborate/workgroups/networking/tcpprobe
-
-       To compile this code as a module, choose M here: the
-       module will be called tcp_probe.
-
 config NET_DROP_MONITOR
        tristate "Network packet drop alerting service"
        depends on INET && TRACEPOINTS
index b73b96a..c44f651 100644 (file)
@@ -1,3 +1,20 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
+#
+# Marek Lindner, Simon Wunderlich
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of version 2 of the GNU General Public
+# License as published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, see <http://www.gnu.org/licenses/>.
+
 #
 # B.A.T.M.A.N meshing protocol
 #
index 915987b..022f6e7 100644 (file)
@@ -1,4 +1,4 @@
-#
+# SPDX-License-Identifier: GPL-2.0
 # Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
 #
 # Marek Lindner, Simon Wunderlich
index 44fd073..80c72c7 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
@@ -37,7 +38,8 @@ char batadv_routing_algo[20] = "BATMAN_IV";
 static struct hlist_head batadv_algo_list;
 
 /**
- * batadv_algo_init - Initialize batman-adv algorithm management data structures
+ * batadv_algo_init() - Initialize batman-adv algorithm management data
+ *  structures
  */
 void batadv_algo_init(void)
 {
@@ -59,6 +61,12 @@ static struct batadv_algo_ops *batadv_algo_get(char *name)
        return bat_algo_ops;
 }
 
+/**
+ * batadv_algo_register() - Register callbacks for a mesh algorithm
+ * @bat_algo_ops: mesh algorithm callbacks to add
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
 int batadv_algo_register(struct batadv_algo_ops *bat_algo_ops)
 {
        struct batadv_algo_ops *bat_algo_ops_tmp;
@@ -88,6 +96,19 @@ int batadv_algo_register(struct batadv_algo_ops *bat_algo_ops)
        return 0;
 }
 
+/**
+ * batadv_algo_select() - Select algorithm of soft interface
+ * @bat_priv: the bat priv with all the soft interface information
+ * @name: name of the algorithm to select
+ *
+ * The algorithm callbacks for the soft interface will be set when the algorithm
+ * with the correct name was found. Any previous selected algorithm will not be
+ * deinitialized and the new selected algorithm will also not be initialized.
+ * It is therefore not allowed to call batadv_algo_select outside the creation
+ * function of the soft interface.
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
 int batadv_algo_select(struct batadv_priv *bat_priv, char *name)
 {
        struct batadv_algo_ops *bat_algo_ops;
@@ -102,6 +123,14 @@ int batadv_algo_select(struct batadv_priv *bat_priv, char *name)
 }
 
 #ifdef CONFIG_BATMAN_ADV_DEBUGFS
+
+/**
+ * batadv_algo_seq_print_text() - Print the supported algorithms in a seq file
+ * @seq: seq file to print on
+ * @offset: not used
+ *
+ * Return: always 0
+ */
 int batadv_algo_seq_print_text(struct seq_file *seq, void *offset)
 {
        struct batadv_algo_ops *bat_algo_ops;
@@ -148,7 +177,7 @@ module_param_cb(routing_algo, &batadv_param_ops_ra, &batadv_param_string_ra,
                0644);
 
 /**
- * batadv_algo_dump_entry - fill in information about one supported routing
+ * batadv_algo_dump_entry() - fill in information about one supported routing
  *  algorithm
  * @msg: netlink message to be sent back
  * @portid: Port to reply to
@@ -179,7 +208,7 @@ static int batadv_algo_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
 }
 
 /**
- * batadv_algo_dump - fill in information about supported routing
+ * batadv_algo_dump() - fill in information about supported routing
  *  algorithms
  * @msg: netlink message to be sent back
  * @cb: Parameters to the netlink request
index 29f6312..0292216 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2011-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Linus Lüssing
index bbe8414..79e3263 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
@@ -26,7 +27,7 @@
 #include <linux/cache.h>
 #include <linux/errno.h>
 #include <linux/etherdevice.h>
-#include <linux/fs.h>
+#include <linux/gfp.h>
 #include <linux/if_ether.h>
 #include <linux/init.h>
 #include <linux/jiffies.h>
@@ -51,6 +52,7 @@
 #include <linux/workqueue.h>
 #include <net/genetlink.h>
 #include <net/netlink.h>
+#include <uapi/linux/batadv_packet.h>
 #include <uapi/linux/batman_adv.h>
 
 #include "bat_algo.h"
@@ -62,7 +64,6 @@
 #include "netlink.h"
 #include "network-coding.h"
 #include "originator.h"
-#include "packet.h"
 #include "routing.h"
 #include "send.h"
 #include "translation-table.h"
@@ -72,21 +73,28 @@ static void batadv_iv_send_outstanding_bat_ogm_packet(struct work_struct *work);
 
 /**
  * enum batadv_dup_status - duplicate status
- * @BATADV_NO_DUP: the packet is no duplicate
- * @BATADV_ORIG_DUP: OGM is a duplicate in the originator (but not for the
- *  neighbor)
- * @BATADV_NEIGH_DUP: OGM is a duplicate for the neighbor
- * @BATADV_PROTECTED: originator is currently protected (after reboot)
  */
 enum batadv_dup_status {
+       /** @BATADV_NO_DUP: the packet is no duplicate */
        BATADV_NO_DUP = 0,
+
+       /**
+        * @BATADV_ORIG_DUP: OGM is a duplicate in the originator (but not for
+        *  the neighbor)
+        */
        BATADV_ORIG_DUP,
+
+       /** @BATADV_NEIGH_DUP: OGM is a duplicate for the neighbor */
        BATADV_NEIGH_DUP,
+
+       /**
+        * @BATADV_PROTECTED: originator is currently protected (after reboot)
+        */
        BATADV_PROTECTED,
 };
 
 /**
- * batadv_ring_buffer_set - update the ring buffer with the given value
+ * batadv_ring_buffer_set() - update the ring buffer with the given value
  * @lq_recv: pointer to the ring buffer
  * @lq_index: index to store the value at
  * @value: value to store in the ring buffer
@@ -98,7 +106,7 @@ static void batadv_ring_buffer_set(u8 lq_recv[], u8 *lq_index, u8 value)
 }
 
 /**
- * batadv_ring_buffer_avg - compute the average of all non-zero values stored
+ * batadv_ring_buffer_avg() - compute the average of all non-zero values stored
  * in the given ring buffer
  * @lq_recv: pointer to the ring buffer
  *
@@ -130,7 +138,7 @@ static u8 batadv_ring_buffer_avg(const u8 lq_recv[])
 }
 
 /**
- * batadv_iv_ogm_orig_free - free the private resources allocated for this
+ * batadv_iv_ogm_orig_free() - free the private resources allocated for this
  *  orig_node
  * @orig_node: the orig_node for which the resources have to be free'd
  */
@@ -141,8 +149,8 @@ static void batadv_iv_ogm_orig_free(struct batadv_orig_node *orig_node)
 }
 
 /**
- * batadv_iv_ogm_orig_add_if - change the private structures of the orig_node to
- *  include the new hard-interface
+ * batadv_iv_ogm_orig_add_if() - change the private structures of the orig_node
+ *  to include the new hard-interface
  * @orig_node: the orig_node that has to be changed
  * @max_if_num: the current amount of interfaces
  *
@@ -186,7 +194,7 @@ unlock:
 }
 
 /**
- * batadv_iv_ogm_drop_bcast_own_entry - drop section of bcast_own
+ * batadv_iv_ogm_drop_bcast_own_entry() - drop section of bcast_own
  * @orig_node: the orig_node that has to be changed
  * @max_if_num: the current amount of interfaces
  * @del_if_num: the index of the interface being removed
@@ -224,7 +232,7 @@ batadv_iv_ogm_drop_bcast_own_entry(struct batadv_orig_node *orig_node,
 }
 
 /**
- * batadv_iv_ogm_drop_bcast_own_sum_entry - drop section of bcast_own_sum
+ * batadv_iv_ogm_drop_bcast_own_sum_entry() - drop section of bcast_own_sum
  * @orig_node: the orig_node that has to be changed
  * @max_if_num: the current amount of interfaces
  * @del_if_num: the index of the interface being removed
@@ -259,8 +267,8 @@ batadv_iv_ogm_drop_bcast_own_sum_entry(struct batadv_orig_node *orig_node,
 }
 
 /**
- * batadv_iv_ogm_orig_del_if - change the private structures of the orig_node to
- *  exclude the removed interface
+ * batadv_iv_ogm_orig_del_if() - change the private structures of the orig_node
+ *  to exclude the removed interface
  * @orig_node: the orig_node that has to be changed
  * @max_if_num: the current amount of interfaces
  * @del_if_num: the index of the interface being removed
@@ -290,7 +298,8 @@ static int batadv_iv_ogm_orig_del_if(struct batadv_orig_node *orig_node,
 }
 
 /**
- * batadv_iv_ogm_orig_get - retrieve or create (if does not exist) an originator
+ * batadv_iv_ogm_orig_get() - retrieve or create (if does not exist) an
+ *  originator
  * @bat_priv: the bat priv with all the soft interface information
  * @addr: mac address of the originator
  *
@@ -447,7 +456,7 @@ static u8 batadv_hop_penalty(u8 tq, const struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_iv_ogm_aggr_packet - checks if there is another OGM attached
+ * batadv_iv_ogm_aggr_packet() - checks if there is another OGM attached
  * @buff_pos: current position in the skb
  * @packet_len: total length of the skb
  * @tvlv_len: tvlv length of the previously considered OGM
@@ -557,7 +566,7 @@ static void batadv_iv_ogm_emit(struct batadv_forw_packet *forw_packet)
 }
 
 /**
- * batadv_iv_ogm_can_aggregate - find out if an OGM can be aggregated on an
+ * batadv_iv_ogm_can_aggregate() - find out if an OGM can be aggregated on an
  *  existing forward packet
  * @new_bat_ogm_packet: OGM packet to be aggregated
  * @bat_priv: the bat priv with all the soft interface information
@@ -660,7 +669,7 @@ out:
 }
 
 /**
- * batadv_iv_ogm_aggregate_new - create a new aggregated packet and add this
+ * batadv_iv_ogm_aggregate_new() - create a new aggregated packet and add this
  *  packet to it.
  * @packet_buff: pointer to the OGM
  * @packet_len: (total) length of the OGM
@@ -743,7 +752,7 @@ static void batadv_iv_ogm_aggregate(struct batadv_forw_packet *forw_packet_aggr,
 }
 
 /**
- * batadv_iv_ogm_queue_add - queue up an OGM for transmission
+ * batadv_iv_ogm_queue_add() - queue up an OGM for transmission
  * @bat_priv: the bat priv with all the soft interface information
  * @packet_buff: pointer to the OGM
  * @packet_len: (total) length of the OGM
@@ -869,8 +878,8 @@ static void batadv_iv_ogm_forward(struct batadv_orig_node *orig_node,
 }
 
 /**
- * batadv_iv_ogm_slide_own_bcast_window - bitshift own OGM broadcast windows for
- * the given interface
+ * batadv_iv_ogm_slide_own_bcast_window() - bitshift own OGM broadcast windows
+ *  for the given interface
  * @hard_iface: the interface for which the windows have to be shifted
  */
 static void
@@ -987,7 +996,7 @@ out:
 }
 
 /**
- * batadv_iv_ogm_orig_update - use OGM to update corresponding data in an
+ * batadv_iv_ogm_orig_update() - use OGM to update corresponding data in an
  *  originator
  * @bat_priv: the bat priv with all the soft interface information
  * @orig_node: the orig node who originally emitted the ogm packet
@@ -1152,7 +1161,7 @@ out:
 }
 
 /**
- * batadv_iv_ogm_calc_tq - calculate tq for current received ogm packet
+ * batadv_iv_ogm_calc_tq() - calculate tq for current received ogm packet
  * @orig_node: the orig node who originally emitted the ogm packet
  * @orig_neigh_node: the orig node struct of the neighbor who sent the packet
  * @batadv_ogm_packet: the ogm packet
@@ -1298,7 +1307,7 @@ out:
 }
 
 /**
- * batadv_iv_ogm_update_seqnos -  process a batman packet for all interfaces,
+ * batadv_iv_ogm_update_seqnos() -  process a batman packet for all interfaces,
  *  adjust the sequence number and find out whether it is a duplicate
  * @ethhdr: ethernet header of the packet
  * @batadv_ogm_packet: OGM packet to be considered
@@ -1401,7 +1410,8 @@ out:
 }
 
 /**
- * batadv_iv_ogm_process_per_outif - process a batman iv OGM for an outgoing if
+ * batadv_iv_ogm_process_per_outif() - process a batman iv OGM for an outgoing
+ *  interface
  * @skb: the skb containing the OGM
  * @ogm_offset: offset from skb->data to start of ogm header
  * @orig_node: the (cached) orig node for the originator of this OGM
@@ -1608,7 +1618,7 @@ out:
 }
 
 /**
- * batadv_iv_ogm_process - process an incoming batman iv OGM
+ * batadv_iv_ogm_process() - process an incoming batman iv OGM
  * @skb: the skb containing the OGM
  * @ogm_offset: offset to the OGM which should be processed (for aggregates)
  * @if_incoming: the interface where this packet was receved
@@ -1861,7 +1871,7 @@ free_skb:
 
 #ifdef CONFIG_BATMAN_ADV_DEBUGFS
 /**
- * batadv_iv_ogm_orig_print_neigh - print neighbors for the originator table
+ * batadv_iv_ogm_orig_print_neigh() - print neighbors for the originator table
  * @orig_node: the orig_node for which the neighbors are printed
  * @if_outgoing: outgoing interface for these entries
  * @seq: debugfs table seq_file struct
@@ -1890,7 +1900,7 @@ batadv_iv_ogm_orig_print_neigh(struct batadv_orig_node *orig_node,
 }
 
 /**
- * batadv_iv_ogm_orig_print - print the originator table
+ * batadv_iv_ogm_orig_print() - print the originator table
  * @bat_priv: the bat priv with all the soft interface information
  * @seq: debugfs table seq_file struct
  * @if_outgoing: the outgoing interface for which this should be printed
@@ -1960,7 +1970,7 @@ next:
 #endif
 
 /**
- * batadv_iv_ogm_neigh_get_tq_avg - Get the TQ average for a neighbour on a
+ * batadv_iv_ogm_neigh_get_tq_avg() - Get the TQ average for a neighbour on a
  *  given outgoing interface.
  * @neigh_node: Neighbour of interest
  * @if_outgoing: Outgoing interface of interest
@@ -1986,7 +1996,7 @@ batadv_iv_ogm_neigh_get_tq_avg(struct batadv_neigh_node *neigh_node,
 }
 
 /**
- * batadv_iv_ogm_orig_dump_subentry - Dump an originator subentry into a
+ * batadv_iv_ogm_orig_dump_subentry() - Dump an originator subentry into a
  *  message
  * @msg: Netlink message to dump into
  * @portid: Port making netlink request
@@ -2048,7 +2058,7 @@ batadv_iv_ogm_orig_dump_subentry(struct sk_buff *msg, u32 portid, u32 seq,
 }
 
 /**
- * batadv_iv_ogm_orig_dump_entry - Dump an originator entry into a message
+ * batadv_iv_ogm_orig_dump_entry() - Dump an originator entry into a message
  * @msg: Netlink message to dump into
  * @portid: Port making netlink request
  * @seq: Sequence number of netlink message
@@ -2110,7 +2120,7 @@ batadv_iv_ogm_orig_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
 }
 
 /**
- * batadv_iv_ogm_orig_dump_bucket - Dump an originator bucket into a
+ * batadv_iv_ogm_orig_dump_bucket() - Dump an originator bucket into a
  *  message
  * @msg: Netlink message to dump into
  * @portid: Port making netlink request
@@ -2153,7 +2163,7 @@ batadv_iv_ogm_orig_dump_bucket(struct sk_buff *msg, u32 portid, u32 seq,
 }
 
 /**
- * batadv_iv_ogm_orig_dump - Dump the originators into a message
+ * batadv_iv_ogm_orig_dump() - Dump the originators into a message
  * @msg: Netlink message to dump into
  * @cb: Control block containing additional options
  * @bat_priv: The bat priv with all the soft interface information
@@ -2190,7 +2200,7 @@ batadv_iv_ogm_orig_dump(struct sk_buff *msg, struct netlink_callback *cb,
 
 #ifdef CONFIG_BATMAN_ADV_DEBUGFS
 /**
- * batadv_iv_hardif_neigh_print - print a single hop neighbour node
+ * batadv_iv_hardif_neigh_print() - print a single hop neighbour node
  * @seq: neighbour table seq_file struct
  * @hardif_neigh: hardif neighbour information
  */
@@ -2209,7 +2219,7 @@ batadv_iv_hardif_neigh_print(struct seq_file *seq,
 }
 
 /**
- * batadv_iv_ogm_neigh_print - print the single hop neighbour list
+ * batadv_iv_ogm_neigh_print() - print the single hop neighbour list
  * @bat_priv: the bat priv with all the soft interface information
  * @seq: neighbour table seq_file struct
  */
@@ -2242,7 +2252,7 @@ static void batadv_iv_neigh_print(struct batadv_priv *bat_priv,
 #endif
 
 /**
- * batadv_iv_ogm_neigh_diff - calculate tq difference of two neighbors
+ * batadv_iv_ogm_neigh_diff() - calculate tq difference of two neighbors
  * @neigh1: the first neighbor object of the comparison
  * @if_outgoing1: outgoing interface for the first neighbor
  * @neigh2: the second neighbor object of the comparison
@@ -2287,7 +2297,7 @@ out:
 }
 
 /**
- * batadv_iv_ogm_neigh_dump_neigh - Dump a neighbour into a netlink message
+ * batadv_iv_ogm_neigh_dump_neigh() - Dump a neighbour into a netlink message
  * @msg: Netlink message to dump into
  * @portid: Port making netlink request
  * @seq: Sequence number of netlink message
@@ -2326,7 +2336,7 @@ batadv_iv_ogm_neigh_dump_neigh(struct sk_buff *msg, u32 portid, u32 seq,
 }
 
 /**
- * batadv_iv_ogm_neigh_dump_hardif - Dump the neighbours of a hard interface
+ * batadv_iv_ogm_neigh_dump_hardif() - Dump the neighbours of a hard interface
  *  into a message
  * @msg: Netlink message to dump into
  * @portid: Port making netlink request
@@ -2365,7 +2375,7 @@ batadv_iv_ogm_neigh_dump_hardif(struct sk_buff *msg, u32 portid, u32 seq,
 }
 
 /**
- * batadv_iv_ogm_neigh_dump - Dump the neighbours into a message
+ * batadv_iv_ogm_neigh_dump() - Dump the neighbours into a message
  * @msg: Netlink message to dump into
  * @cb: Control block containing additional options
  * @bat_priv: The bat priv with all the soft interface information
@@ -2417,7 +2427,7 @@ batadv_iv_ogm_neigh_dump(struct sk_buff *msg, struct netlink_callback *cb,
 }
 
 /**
- * batadv_iv_ogm_neigh_cmp - compare the metrics of two neighbors
+ * batadv_iv_ogm_neigh_cmp() - compare the metrics of two neighbors
  * @neigh1: the first neighbor object of the comparison
  * @if_outgoing1: outgoing interface for the first neighbor
  * @neigh2: the second neighbor object of the comparison
@@ -2443,7 +2453,7 @@ static int batadv_iv_ogm_neigh_cmp(struct batadv_neigh_node *neigh1,
 }
 
 /**
- * batadv_iv_ogm_neigh_is_sob - check if neigh1 is similarly good or better
+ * batadv_iv_ogm_neigh_is_sob() - check if neigh1 is similarly good or better
  *  than neigh2 from the metric prospective
  * @neigh1: the first neighbor object of the comparison
  * @if_outgoing1: outgoing interface for the first neighbor
@@ -2478,7 +2488,7 @@ static void batadv_iv_iface_activate(struct batadv_hard_iface *hard_iface)
 }
 
 /**
- * batadv_iv_init_sel_class - initialize GW selection class
+ * batadv_iv_init_sel_class() - initialize GW selection class
  * @bat_priv: the bat priv with all the soft interface information
  */
 static void batadv_iv_init_sel_class(struct batadv_priv *bat_priv)
@@ -2703,7 +2713,7 @@ static void batadv_iv_gw_print(struct batadv_priv *bat_priv,
 #endif
 
 /**
- * batadv_iv_gw_dump_entry - Dump a gateway into a message
+ * batadv_iv_gw_dump_entry() - Dump a gateway into a message
  * @msg: Netlink message to dump into
  * @portid: Port making netlink request
  * @seq: Sequence number of netlink message
@@ -2774,7 +2784,7 @@ out:
 }
 
 /**
- * batadv_iv_gw_dump - Dump gateways into a message
+ * batadv_iv_gw_dump() - Dump gateways into a message
  * @msg: Netlink message to dump into
  * @cb: Control block containing additional options
  * @bat_priv: The bat priv with all the soft interface information
@@ -2843,6 +2853,11 @@ static struct batadv_algo_ops batadv_batman_iv __read_mostly = {
        },
 };
 
+/**
+ * batadv_iv_init() - B.A.T.M.A.N. IV initialization function
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
 int __init batadv_iv_init(void)
 {
        int ret;
index ae2ab52..9dc0dd5 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
index e0e2bfc..27e165a 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2013-2017  B.A.T.M.A.N. contributors:
  *
  * Linus Lüssing, Marek Lindner
@@ -36,6 +37,7 @@
 #include <linux/workqueue.h>
 #include <net/genetlink.h>
 #include <net/netlink.h>
+#include <uapi/linux/batadv_packet.h>
 #include <uapi/linux/batman_adv.h>
 
 #include "bat_algo.h"
@@ -48,7 +50,6 @@
 #include "log.h"
 #include "netlink.h"
 #include "originator.h"
-#include "packet.h"
 
 struct sk_buff;
 
@@ -99,7 +100,7 @@ static void batadv_v_primary_iface_set(struct batadv_hard_iface *hard_iface)
 }
 
 /**
- * batadv_v_iface_update_mac - react to hard-interface MAC address change
+ * batadv_v_iface_update_mac() - react to hard-interface MAC address change
  * @hard_iface: the modified interface
  *
  * If the modified interface is the primary one, update the originator
@@ -130,7 +131,7 @@ batadv_v_hardif_neigh_init(struct batadv_hardif_neigh_node *hardif_neigh)
 
 #ifdef CONFIG_BATMAN_ADV_DEBUGFS
 /**
- * batadv_v_orig_print_neigh - print neighbors for the originator table
+ * batadv_v_orig_print_neigh() - print neighbors for the originator table
  * @orig_node: the orig_node for which the neighbors are printed
  * @if_outgoing: outgoing interface for these entries
  * @seq: debugfs table seq_file struct
@@ -160,7 +161,7 @@ batadv_v_orig_print_neigh(struct batadv_orig_node *orig_node,
 }
 
 /**
- * batadv_v_hardif_neigh_print - print a single ELP neighbour node
+ * batadv_v_hardif_neigh_print() - print a single ELP neighbour node
  * @seq: neighbour table seq_file struct
  * @hardif_neigh: hardif neighbour information
  */
@@ -181,7 +182,7 @@ batadv_v_hardif_neigh_print(struct seq_file *seq,
 }
 
 /**
- * batadv_v_neigh_print - print the single hop neighbour list
+ * batadv_v_neigh_print() - print the single hop neighbour list
  * @bat_priv: the bat priv with all the soft interface information
  * @seq: neighbour table seq_file struct
  */
@@ -215,7 +216,7 @@ static void batadv_v_neigh_print(struct batadv_priv *bat_priv,
 #endif
 
 /**
- * batadv_v_neigh_dump_neigh - Dump a neighbour into a message
+ * batadv_v_neigh_dump_neigh() - Dump a neighbour into a message
  * @msg: Netlink message to dump into
  * @portid: Port making netlink request
  * @seq: Sequence number of netlink message
@@ -258,7 +259,7 @@ batadv_v_neigh_dump_neigh(struct sk_buff *msg, u32 portid, u32 seq,
 }
 
 /**
- * batadv_v_neigh_dump_hardif - Dump the  neighbours of a hard interface  into
+ * batadv_v_neigh_dump_hardif() - Dump the  neighbours of a hard interface into
  *  a message
  * @msg: Netlink message to dump into
  * @portid: Port making netlink request
@@ -296,7 +297,7 @@ batadv_v_neigh_dump_hardif(struct sk_buff *msg, u32 portid, u32 seq,
 }
 
 /**
- * batadv_v_neigh_dump - Dump the neighbours of a hard interface  into a
+ * batadv_v_neigh_dump() - Dump the neighbours of a hard interface  into a
  *  message
  * @msg: Netlink message to dump into
  * @cb: Control block containing additional options
@@ -348,7 +349,7 @@ batadv_v_neigh_dump(struct sk_buff *msg, struct netlink_callback *cb,
 
 #ifdef CONFIG_BATMAN_ADV_DEBUGFS
 /**
- * batadv_v_orig_print - print the originator table
+ * batadv_v_orig_print() - print the originator table
  * @bat_priv: the bat priv with all the soft interface information
  * @seq: debugfs table seq_file struct
  * @if_outgoing: the outgoing interface for which this should be printed
@@ -416,8 +417,7 @@ next:
 #endif
 
 /**
- * batadv_v_orig_dump_subentry - Dump an originator subentry into a
- *  message
+ * batadv_v_orig_dump_subentry() - Dump an originator subentry into a message
  * @msg: Netlink message to dump into
  * @portid: Port making netlink request
  * @seq: Sequence number of netlink message
@@ -483,7 +483,7 @@ batadv_v_orig_dump_subentry(struct sk_buff *msg, u32 portid, u32 seq,
 }
 
 /**
- * batadv_v_orig_dump_entry - Dump an originator entry into a message
+ * batadv_v_orig_dump_entry() - Dump an originator entry into a message
  * @msg: Netlink message to dump into
  * @portid: Port making netlink request
  * @seq: Sequence number of netlink message
@@ -536,8 +536,7 @@ batadv_v_orig_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
 }
 
 /**
- * batadv_v_orig_dump_bucket - Dump an originator bucket into a
- *  message
+ * batadv_v_orig_dump_bucket() - Dump an originator bucket into a message
  * @msg: Netlink message to dump into
  * @portid: Port making netlink request
  * @seq: Sequence number of netlink message
@@ -578,7 +577,7 @@ batadv_v_orig_dump_bucket(struct sk_buff *msg, u32 portid, u32 seq,
 }
 
 /**
- * batadv_v_orig_dump - Dump the originators into a message
+ * batadv_v_orig_dump() - Dump the originators into a message
  * @msg: Netlink message to dump into
  * @cb: Control block containing additional options
  * @bat_priv: The bat priv with all the soft interface information
@@ -668,7 +667,7 @@ err_ifinfo1:
 }
 
 /**
- * batadv_v_init_sel_class - initialize GW selection class
+ * batadv_v_init_sel_class() - initialize GW selection class
  * @bat_priv: the bat priv with all the soft interface information
  */
 static void batadv_v_init_sel_class(struct batadv_priv *bat_priv)
@@ -704,7 +703,7 @@ static ssize_t batadv_v_show_sel_class(struct batadv_priv *bat_priv, char *buff)
 }
 
 /**
- * batadv_v_gw_throughput_get - retrieve the GW-bandwidth for a given GW
+ * batadv_v_gw_throughput_get() - retrieve the GW-bandwidth for a given GW
  * @gw_node: the GW to retrieve the metric for
  * @bw: the pointer where the metric will be stored. The metric is computed as
  *  the minimum between the GW advertised throughput and the path throughput to
@@ -747,7 +746,7 @@ out:
 }
 
 /**
- * batadv_v_gw_get_best_gw_node - retrieve the best GW node
+ * batadv_v_gw_get_best_gw_node() - retrieve the best GW node
  * @bat_priv: the bat priv with all the soft interface information
  *
  * Return: the GW node having the best GW-metric, NULL if no GW is known
@@ -785,7 +784,7 @@ next:
 }
 
 /**
- * batadv_v_gw_is_eligible - check if a originator would be selected as GW
+ * batadv_v_gw_is_eligible() - check if a originator would be selected as GW
  * @bat_priv: the bat priv with all the soft interface information
  * @curr_gw_orig: originator representing the currently selected GW
  * @orig_node: the originator representing the new candidate
@@ -884,7 +883,7 @@ out:
 }
 
 /**
- * batadv_v_gw_print - print the gateway list
+ * batadv_v_gw_print() - print the gateway list
  * @bat_priv: the bat priv with all the soft interface information
  * @seq: gateway table seq_file struct
  */
@@ -913,7 +912,7 @@ static void batadv_v_gw_print(struct batadv_priv *bat_priv,
 #endif
 
 /**
- * batadv_v_gw_dump_entry - Dump a gateway into a message
+ * batadv_v_gw_dump_entry() - Dump a gateway into a message
  * @msg: Netlink message to dump into
  * @portid: Port making netlink request
  * @seq: Sequence number of netlink message
@@ -1004,7 +1003,7 @@ out:
 }
 
 /**
- * batadv_v_gw_dump - Dump gateways into a message
+ * batadv_v_gw_dump() - Dump gateways into a message
  * @msg: Netlink message to dump into
  * @cb: Control block containing additional options
  * @bat_priv: The bat priv with all the soft interface information
@@ -1074,7 +1073,7 @@ static struct batadv_algo_ops batadv_batman_v __read_mostly = {
 };
 
 /**
- * batadv_v_hardif_init - initialize the algorithm specific fields in the
+ * batadv_v_hardif_init() - initialize the algorithm specific fields in the
  *  hard-interface object
  * @hard_iface: the hard-interface to initialize
  */
@@ -1088,7 +1087,7 @@ void batadv_v_hardif_init(struct batadv_hard_iface *hard_iface)
 }
 
 /**
- * batadv_v_mesh_init - initialize the B.A.T.M.A.N. V private resources for a
+ * batadv_v_mesh_init() - initialize the B.A.T.M.A.N. V private resources for a
  *  mesh
  * @bat_priv: the object representing the mesh interface to initialise
  *
@@ -1106,7 +1105,7 @@ int batadv_v_mesh_init(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_v_mesh_free - free the B.A.T.M.A.N. V private resources for a mesh
+ * batadv_v_mesh_free() - free the B.A.T.M.A.N. V private resources for a mesh
  * @bat_priv: the object representing the mesh interface to free
  */
 void batadv_v_mesh_free(struct batadv_priv *bat_priv)
@@ -1115,7 +1114,7 @@ void batadv_v_mesh_free(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_v_init - B.A.T.M.A.N. V initialization function
+ * batadv_v_init() - B.A.T.M.A.N. V initialization function
  *
  * Description: Takes care of initializing all the subcomponents.
  * It is invoked upon module load only.
index dd7c4b6..a17ab68 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2011-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Linus Lüssing
index 1de992c..a83478c 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2011-2017  B.A.T.M.A.N. contributors:
  *
  * Linus Lüssing, Marek Lindner
@@ -24,7 +25,7 @@
 #include <linux/errno.h>
 #include <linux/etherdevice.h>
 #include <linux/ethtool.h>
-#include <linux/fs.h>
+#include <linux/gfp.h>
 #include <linux/if_ether.h>
 #include <linux/jiffies.h>
 #include <linux/kernel.h>
 #include <linux/types.h>
 #include <linux/workqueue.h>
 #include <net/cfg80211.h>
+#include <uapi/linux/batadv_packet.h>
 
 #include "bat_algo.h"
 #include "bat_v_ogm.h"
 #include "hard-interface.h"
 #include "log.h"
 #include "originator.h"
-#include "packet.h"
 #include "routing.h"
 #include "send.h"
 
 /**
- * batadv_v_elp_start_timer - restart timer for ELP periodic work
+ * batadv_v_elp_start_timer() - restart timer for ELP periodic work
  * @hard_iface: the interface for which the timer has to be reset
  */
 static void batadv_v_elp_start_timer(struct batadv_hard_iface *hard_iface)
@@ -67,7 +68,7 @@ static void batadv_v_elp_start_timer(struct batadv_hard_iface *hard_iface)
 }
 
 /**
- * batadv_v_elp_get_throughput - get the throughput towards a neighbour
+ * batadv_v_elp_get_throughput() - get the throughput towards a neighbour
  * @neigh: the neighbour for which the throughput has to be obtained
  *
  * Return: The throughput towards the given neighbour in multiples of 100kpbs
@@ -153,8 +154,8 @@ default_throughput:
 }
 
 /**
- * batadv_v_elp_throughput_metric_update - worker updating the throughput metric
- *  of a single hop neighbour
+ * batadv_v_elp_throughput_metric_update() - worker updating the throughput
+ *  metric of a single hop neighbour
  * @work: the work queue item
  */
 void batadv_v_elp_throughput_metric_update(struct work_struct *work)
@@ -177,7 +178,7 @@ void batadv_v_elp_throughput_metric_update(struct work_struct *work)
 }
 
 /**
- * batadv_v_elp_wifi_neigh_probe - send link probing packets to a neighbour
+ * batadv_v_elp_wifi_neigh_probe() - send link probing packets to a neighbour
  * @neigh: the neighbour to probe
  *
  * Sends a predefined number of unicast wifi packets to a given neighbour in
@@ -240,7 +241,7 @@ batadv_v_elp_wifi_neigh_probe(struct batadv_hardif_neigh_node *neigh)
 }
 
 /**
- * batadv_v_elp_periodic_work - ELP periodic task per interface
+ * batadv_v_elp_periodic_work() - ELP periodic task per interface
  * @work: work queue item
  *
  * Emits broadcast ELP message in regular intervals.
@@ -327,7 +328,7 @@ out:
 }
 
 /**
- * batadv_v_elp_iface_enable - setup the ELP interface private resources
+ * batadv_v_elp_iface_enable() - setup the ELP interface private resources
  * @hard_iface: interface for which the data has to be prepared
  *
  * Return: 0 on success or a -ENOMEM in case of failure.
@@ -375,7 +376,7 @@ out:
 }
 
 /**
- * batadv_v_elp_iface_disable - release ELP interface private resources
+ * batadv_v_elp_iface_disable() - release ELP interface private resources
  * @hard_iface: interface for which the resources have to be released
  */
 void batadv_v_elp_iface_disable(struct batadv_hard_iface *hard_iface)
@@ -387,7 +388,7 @@ void batadv_v_elp_iface_disable(struct batadv_hard_iface *hard_iface)
 }
 
 /**
- * batadv_v_elp_iface_activate - update the ELP buffer belonging to the given
+ * batadv_v_elp_iface_activate() - update the ELP buffer belonging to the given
  *  hard-interface
  * @primary_iface: the new primary interface
  * @hard_iface: interface holding the to-be-updated buffer
@@ -408,7 +409,7 @@ void batadv_v_elp_iface_activate(struct batadv_hard_iface *primary_iface,
 }
 
 /**
- * batadv_v_elp_primary_iface_set - change internal data to reflect the new
+ * batadv_v_elp_primary_iface_set() - change internal data to reflect the new
  *  primary interface
  * @primary_iface: the new primary interface
  */
@@ -428,7 +429,7 @@ void batadv_v_elp_primary_iface_set(struct batadv_hard_iface *primary_iface)
 }
 
 /**
- * batadv_v_elp_neigh_update - update an ELP neighbour node
+ * batadv_v_elp_neigh_update() - update an ELP neighbour node
  * @bat_priv: the bat priv with all the soft interface information
  * @neigh_addr: the neighbour interface address
  * @if_incoming: the interface the packet was received through
@@ -488,7 +489,7 @@ orig_free:
 }
 
 /**
- * batadv_v_elp_packet_recv - main ELP packet handler
+ * batadv_v_elp_packet_recv() - main ELP packet handler
  * @skb: the received packet
  * @if_incoming: the interface this packet was received through
  *
index 376ead2..5e39d05 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2013-2017  B.A.T.M.A.N. contributors:
  *
  * Linus Lüssing, Marek Lindner
index c251445..ba59b77 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2013-2017  B.A.T.M.A.N. contributors:
  *
  * Antonio Quartulli
@@ -22,7 +23,7 @@
 #include <linux/byteorder/generic.h>
 #include <linux/errno.h>
 #include <linux/etherdevice.h>
-#include <linux/fs.h>
+#include <linux/gfp.h>
 #include <linux/if_ether.h>
 #include <linux/jiffies.h>
 #include <linux/kernel.h>
 #include <linux/string.h>
 #include <linux/types.h>
 #include <linux/workqueue.h>
+#include <uapi/linux/batadv_packet.h>
 
 #include "bat_algo.h"
 #include "hard-interface.h"
 #include "hash.h"
 #include "log.h"
 #include "originator.h"
-#include "packet.h"
 #include "routing.h"
 #include "send.h"
 #include "translation-table.h"
 #include "tvlv.h"
 
 /**
- * batadv_v_ogm_orig_get - retrieve and possibly create an originator node
+ * batadv_v_ogm_orig_get() - retrieve and possibly create an originator node
  * @bat_priv: the bat priv with all the soft interface information
  * @addr: the address of the originator
  *
@@ -88,7 +89,7 @@ struct batadv_orig_node *batadv_v_ogm_orig_get(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_v_ogm_start_timer - restart the OGM sending timer
+ * batadv_v_ogm_start_timer() - restart the OGM sending timer
  * @bat_priv: the bat priv with all the soft interface information
  */
 static void batadv_v_ogm_start_timer(struct batadv_priv *bat_priv)
@@ -107,7 +108,7 @@ static void batadv_v_ogm_start_timer(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_v_ogm_send_to_if - send a batman ogm using a given interface
+ * batadv_v_ogm_send_to_if() - send a batman ogm using a given interface
  * @skb: the OGM to send
  * @hard_iface: the interface to use to send the OGM
  */
@@ -127,7 +128,7 @@ static void batadv_v_ogm_send_to_if(struct sk_buff *skb,
 }
 
 /**
- * batadv_v_ogm_send - periodic worker broadcasting the own OGM
+ * batadv_v_ogm_send() - periodic worker broadcasting the own OGM
  * @work: work queue item
  */
 static void batadv_v_ogm_send(struct work_struct *work)
@@ -235,7 +236,7 @@ out:
 }
 
 /**
- * batadv_v_ogm_iface_enable - prepare an interface for B.A.T.M.A.N. V
+ * batadv_v_ogm_iface_enable() - prepare an interface for B.A.T.M.A.N. V
  * @hard_iface: the interface to prepare
  *
  * Takes care of scheduling own OGM sending routine for this interface.
@@ -252,7 +253,7 @@ int batadv_v_ogm_iface_enable(struct batadv_hard_iface *hard_iface)
 }
 
 /**
- * batadv_v_ogm_primary_iface_set - set a new primary interface
+ * batadv_v_ogm_primary_iface_set() - set a new primary interface
  * @primary_iface: the new primary interface
  */
 void batadv_v_ogm_primary_iface_set(struct batadv_hard_iface *primary_iface)
@@ -268,8 +269,8 @@ void batadv_v_ogm_primary_iface_set(struct batadv_hard_iface *primary_iface)
 }
 
 /**
- * batadv_v_forward_penalty - apply a penalty to the throughput metric forwarded
- *  with B.A.T.M.A.N. V OGMs
+ * batadv_v_forward_penalty() - apply a penalty to the throughput metric
+ *  forwarded with B.A.T.M.A.N. V OGMs
  * @bat_priv: the bat priv with all the soft interface information
  * @if_incoming: the interface where the OGM has been received
  * @if_outgoing: the interface where the OGM has to be forwarded to
@@ -314,7 +315,7 @@ static u32 batadv_v_forward_penalty(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_v_ogm_forward - check conditions and forward an OGM to the given
+ * batadv_v_ogm_forward() - check conditions and forward an OGM to the given
  *  outgoing interface
  * @bat_priv: the bat priv with all the soft interface information
  * @ogm_received: previously received OGM to be forwarded
@@ -405,7 +406,7 @@ out:
 }
 
 /**
- * batadv_v_ogm_metric_update - update route metric based on OGM
+ * batadv_v_ogm_metric_update() - update route metric based on OGM
  * @bat_priv: the bat priv with all the soft interface information
  * @ogm2: OGM2 structure
  * @orig_node: Originator structure for which the OGM has been received
@@ -490,7 +491,7 @@ out:
 }
 
 /**
- * batadv_v_ogm_route_update - update routes based on OGM
+ * batadv_v_ogm_route_update() - update routes based on OGM
  * @bat_priv: the bat priv with all the soft interface information
  * @ethhdr: the Ethernet header of the OGM2
  * @ogm2: OGM2 structure
@@ -590,7 +591,7 @@ out:
 }
 
 /**
- * batadv_v_ogm_process_per_outif - process a batman v OGM for an outgoing if
+ * batadv_v_ogm_process_per_outif() - process a batman v OGM for an outgoing if
  * @bat_priv: the bat priv with all the soft interface information
  * @ethhdr: the Ethernet header of the OGM2
  * @ogm2: OGM2 structure
@@ -639,7 +640,7 @@ batadv_v_ogm_process_per_outif(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_v_ogm_aggr_packet - checks if there is another OGM aggregated
+ * batadv_v_ogm_aggr_packet() - checks if there is another OGM aggregated
  * @buff_pos: current position in the skb
  * @packet_len: total length of the skb
  * @tvlv_len: tvlv length of the previously considered OGM
@@ -659,7 +660,7 @@ static bool batadv_v_ogm_aggr_packet(int buff_pos, int packet_len,
 }
 
 /**
- * batadv_v_ogm_process - process an incoming batman v OGM
+ * batadv_v_ogm_process() - process an incoming batman v OGM
  * @skb: the skb containing the OGM
  * @ogm_offset: offset to the OGM which should be processed (for aggregates)
  * @if_incoming: the interface where this packet was receved
@@ -787,7 +788,7 @@ out:
 }
 
 /**
- * batadv_v_ogm_packet_recv - OGM2 receiving handler
+ * batadv_v_ogm_packet_recv() - OGM2 receiving handler
  * @skb: the received OGM
  * @if_incoming: the interface where this OGM has been received
  *
@@ -851,7 +852,7 @@ free_skb:
 }
 
 /**
- * batadv_v_ogm_init - initialise the OGM2 engine
+ * batadv_v_ogm_init() - initialise the OGM2 engine
  * @bat_priv: the bat priv with all the soft interface information
  *
  * Return: 0 on success or a negative error code in case of failure
@@ -884,7 +885,7 @@ int batadv_v_ogm_init(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_v_ogm_free - free OGM private resources
+ * batadv_v_ogm_free() - free OGM private resources
  * @bat_priv: the bat priv with all the soft interface information
  */
 void batadv_v_ogm_free(struct batadv_priv *bat_priv)
index 2068770..6a4c14c 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2013-2017  B.A.T.M.A.N. contributors:
  *
  * Antonio Quartulli
index 2b070c7..bdc1ef0 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2006-2017  B.A.T.M.A.N. contributors:
  *
  * Simon Wunderlich, Marek Lindner
@@ -32,7 +33,7 @@ static void batadv_bitmap_shift_left(unsigned long *seq_bits, s32 n)
 }
 
 /**
- * batadv_bit_get_packet - receive and process one packet within the sequence
+ * batadv_bit_get_packet() - receive and process one packet within the sequence
  *  number window
  * @priv: the bat priv with all the soft interface information
  * @seq_bits: pointer to the sequence number receive packet
index cc262c9..ca9d075 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2006-2017  B.A.T.M.A.N. contributors:
  *
  * Simon Wunderlich, Marek Lindner
@@ -26,7 +27,7 @@
 #include <linux/types.h>
 
 /**
- * batadv_test_bit - check if bit is set in the current window
+ * batadv_test_bit() - check if bit is set in the current window
  *
  * @seq_bits: pointer to the sequence number receive packet
  * @last_seqno: latest sequence number in seq_bits
@@ -46,7 +47,12 @@ static inline bool batadv_test_bit(const unsigned long *seq_bits,
        return test_bit(diff, seq_bits) != 0;
 }
 
-/* turn corresponding bit on, so we can remember that we got the packet */
+/**
+ * batadv_set_bit() - Turn corresponding bit on, so we can remember that we got
+ *  the packet
+ * @seq_bits: bitmap of the packet receive window
+ * @n: relative sequence number of newly received packet
+ */
 static inline void batadv_set_bit(unsigned long *seq_bits, s32 n)
 {
        /* if too old, just drop it */
index cdd8e8e..fad4785 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2011-2017  B.A.T.M.A.N. contributors:
  *
  * Simon Wunderlich
@@ -24,7 +25,7 @@
 #include <linux/crc16.h>
 #include <linux/errno.h>
 #include <linux/etherdevice.h>
-#include <linux/fs.h>
+#include <linux/gfp.h>
 #include <linux/if_arp.h>
 #include <linux/if_ether.h>
 #include <linux/if_vlan.h>
@@ -49,6 +50,7 @@
 #include <net/genetlink.h>
 #include <net/netlink.h>
 #include <net/sock.h>
+#include <uapi/linux/batadv_packet.h>
 #include <uapi/linux/batman_adv.h>
 
 #include "hard-interface.h"
@@ -56,7 +58,6 @@
 #include "log.h"
 #include "netlink.h"
 #include "originator.h"
-#include "packet.h"
 #include "soft-interface.h"
 #include "sysfs.h"
 #include "translation-table.h"
@@ -69,7 +70,7 @@ batadv_bla_send_announce(struct batadv_priv *bat_priv,
                         struct batadv_bla_backbone_gw *backbone_gw);
 
 /**
- * batadv_choose_claim - choose the right bucket for a claim.
+ * batadv_choose_claim() - choose the right bucket for a claim.
  * @data: data to hash
  * @size: size of the hash table
  *
@@ -87,7 +88,7 @@ static inline u32 batadv_choose_claim(const void *data, u32 size)
 }
 
 /**
- * batadv_choose_backbone_gw - choose the right bucket for a backbone gateway.
+ * batadv_choose_backbone_gw() - choose the right bucket for a backbone gateway.
  * @data: data to hash
  * @size: size of the hash table
  *
@@ -105,7 +106,7 @@ static inline u32 batadv_choose_backbone_gw(const void *data, u32 size)
 }
 
 /**
- * batadv_compare_backbone_gw - compare address and vid of two backbone gws
+ * batadv_compare_backbone_gw() - compare address and vid of two backbone gws
  * @node: list node of the first entry to compare
  * @data2: pointer to the second backbone gateway
  *
@@ -129,7 +130,7 @@ static bool batadv_compare_backbone_gw(const struct hlist_node *node,
 }
 
 /**
- * batadv_compare_claim - compare address and vid of two claims
+ * batadv_compare_claim() - compare address and vid of two claims
  * @node: list node of the first entry to compare
  * @data2: pointer to the second claims
  *
@@ -153,7 +154,7 @@ static bool batadv_compare_claim(const struct hlist_node *node,
 }
 
 /**
- * batadv_backbone_gw_release - release backbone gw from lists and queue for
+ * batadv_backbone_gw_release() - release backbone gw from lists and queue for
  *  free after rcu grace period
  * @ref: kref pointer of the backbone gw
  */
@@ -168,7 +169,7 @@ static void batadv_backbone_gw_release(struct kref *ref)
 }
 
 /**
- * batadv_backbone_gw_put - decrement the backbone gw refcounter and possibly
+ * batadv_backbone_gw_put() - decrement the backbone gw refcounter and possibly
  *  release it
  * @backbone_gw: backbone gateway to be free'd
  */
@@ -178,8 +179,8 @@ static void batadv_backbone_gw_put(struct batadv_bla_backbone_gw *backbone_gw)
 }
 
 /**
- * batadv_claim_release - release claim from lists and queue for free after rcu
- *  grace period
+ * batadv_claim_release() - release claim from lists and queue for free after
+ *  rcu grace period
  * @ref: kref pointer of the claim
  */
 static void batadv_claim_release(struct kref *ref)
@@ -204,8 +205,7 @@ static void batadv_claim_release(struct kref *ref)
 }
 
 /**
- * batadv_claim_put - decrement the claim refcounter and possibly
- *  release it
+ * batadv_claim_put() - decrement the claim refcounter and possibly release it
  * @claim: claim to be free'd
  */
 static void batadv_claim_put(struct batadv_bla_claim *claim)
@@ -214,7 +214,7 @@ static void batadv_claim_put(struct batadv_bla_claim *claim)
 }
 
 /**
- * batadv_claim_hash_find - looks for a claim in the claim hash
+ * batadv_claim_hash_find() - looks for a claim in the claim hash
  * @bat_priv: the bat priv with all the soft interface information
  * @data: search data (may be local/static data)
  *
@@ -253,7 +253,7 @@ batadv_claim_hash_find(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_backbone_hash_find - looks for a backbone gateway in the hash
+ * batadv_backbone_hash_find() - looks for a backbone gateway in the hash
  * @bat_priv: the bat priv with all the soft interface information
  * @addr: the address of the originator
  * @vid: the VLAN ID
@@ -297,7 +297,7 @@ batadv_backbone_hash_find(struct batadv_priv *bat_priv, u8 *addr,
 }
 
 /**
- * batadv_bla_del_backbone_claims - delete all claims for a backbone
+ * batadv_bla_del_backbone_claims() - delete all claims for a backbone
  * @backbone_gw: backbone gateway where the claims should be removed
  */
 static void
@@ -337,7 +337,7 @@ batadv_bla_del_backbone_claims(struct batadv_bla_backbone_gw *backbone_gw)
 }
 
 /**
- * batadv_bla_send_claim - sends a claim frame according to the provided info
+ * batadv_bla_send_claim() - sends a claim frame according to the provided info
  * @bat_priv: the bat priv with all the soft interface information
  * @mac: the mac address to be announced within the claim
  * @vid: the VLAN ID
@@ -457,7 +457,7 @@ out:
 }
 
 /**
- * batadv_bla_loopdetect_report - worker for reporting the loop
+ * batadv_bla_loopdetect_report() - worker for reporting the loop
  * @work: work queue item
  *
  * Throws an uevent, as the loopdetect check function can't do that itself
@@ -487,7 +487,7 @@ static void batadv_bla_loopdetect_report(struct work_struct *work)
 }
 
 /**
- * batadv_bla_get_backbone_gw - finds or creates a backbone gateway
+ * batadv_bla_get_backbone_gw() - finds or creates a backbone gateway
  * @bat_priv: the bat priv with all the soft interface information
  * @orig: the mac address of the originator
  * @vid: the VLAN ID
@@ -560,7 +560,7 @@ batadv_bla_get_backbone_gw(struct batadv_priv *bat_priv, u8 *orig,
 }
 
 /**
- * batadv_bla_update_own_backbone_gw - updates the own backbone gw for a VLAN
+ * batadv_bla_update_own_backbone_gw() - updates the own backbone gw for a VLAN
  * @bat_priv: the bat priv with all the soft interface information
  * @primary_if: the selected primary interface
  * @vid: VLAN identifier
@@ -586,7 +586,7 @@ batadv_bla_update_own_backbone_gw(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_bla_answer_request - answer a bla request by sending own claims
+ * batadv_bla_answer_request() - answer a bla request by sending own claims
  * @bat_priv: the bat priv with all the soft interface information
  * @primary_if: interface where the request came on
  * @vid: the vid where the request came on
@@ -636,7 +636,7 @@ static void batadv_bla_answer_request(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_bla_send_request - send a request to repeat claims
+ * batadv_bla_send_request() - send a request to repeat claims
  * @backbone_gw: the backbone gateway from whom we are out of sync
  *
  * When the crc is wrong, ask the backbone gateway for a full table update.
@@ -663,7 +663,7 @@ static void batadv_bla_send_request(struct batadv_bla_backbone_gw *backbone_gw)
 }
 
 /**
- * batadv_bla_send_announce - Send an announcement frame
+ * batadv_bla_send_announce() - Send an announcement frame
  * @bat_priv: the bat priv with all the soft interface information
  * @backbone_gw: our backbone gateway which should be announced
  */
@@ -684,7 +684,7 @@ static void batadv_bla_send_announce(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_bla_add_claim - Adds a claim in the claim hash
+ * batadv_bla_add_claim() - Adds a claim in the claim hash
  * @bat_priv: the bat priv with all the soft interface information
  * @mac: the mac address of the claim
  * @vid: the VLAN ID of the frame
@@ -774,7 +774,7 @@ claim_free_ref:
 }
 
 /**
- * batadv_bla_claim_get_backbone_gw - Get valid reference for backbone_gw of
+ * batadv_bla_claim_get_backbone_gw() - Get valid reference for backbone_gw of
  *  claim
  * @claim: claim whose backbone_gw should be returned
  *
@@ -794,7 +794,7 @@ batadv_bla_claim_get_backbone_gw(struct batadv_bla_claim *claim)
 }
 
 /**
- * batadv_bla_del_claim - delete a claim from the claim hash
+ * batadv_bla_del_claim() - delete a claim from the claim hash
  * @bat_priv: the bat priv with all the soft interface information
  * @mac: mac address of the claim to be removed
  * @vid: VLAN id for the claim to be removed
@@ -822,7 +822,7 @@ static void batadv_bla_del_claim(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_handle_announce - check for ANNOUNCE frame
+ * batadv_handle_announce() - check for ANNOUNCE frame
  * @bat_priv: the bat priv with all the soft interface information
  * @an_addr: announcement mac address (ARP Sender HW address)
  * @backbone_addr: originator address of the sender (Ethernet source MAC)
@@ -880,7 +880,7 @@ static bool batadv_handle_announce(struct batadv_priv *bat_priv, u8 *an_addr,
 }
 
 /**
- * batadv_handle_request - check for REQUEST frame
+ * batadv_handle_request() - check for REQUEST frame
  * @bat_priv: the bat priv with all the soft interface information
  * @primary_if: the primary hard interface of this batman soft interface
  * @backbone_addr: backbone address to be requested (ARP sender HW MAC)
@@ -913,7 +913,7 @@ static bool batadv_handle_request(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_handle_unclaim - check for UNCLAIM frame
+ * batadv_handle_unclaim() - check for UNCLAIM frame
  * @bat_priv: the bat priv with all the soft interface information
  * @primary_if: the primary hard interface of this batman soft interface
  * @backbone_addr: originator address of the backbone (Ethernet source)
@@ -951,7 +951,7 @@ static bool batadv_handle_unclaim(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_handle_claim - check for CLAIM frame
+ * batadv_handle_claim() - check for CLAIM frame
  * @bat_priv: the bat priv with all the soft interface information
  * @primary_if: the primary hard interface of this batman soft interface
  * @backbone_addr: originator address of the backbone (Ethernet Source)
@@ -988,7 +988,7 @@ static bool batadv_handle_claim(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_check_claim_group - check for claim group membership
+ * batadv_check_claim_group() - check for claim group membership
  * @bat_priv: the bat priv with all the soft interface information
  * @primary_if: the primary interface of this batman interface
  * @hw_src: the Hardware source in the ARP Header
@@ -1063,7 +1063,7 @@ static int batadv_check_claim_group(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_bla_process_claim - Check if this is a claim frame, and process it
+ * batadv_bla_process_claim() - Check if this is a claim frame, and process it
  * @bat_priv: the bat priv with all the soft interface information
  * @primary_if: the primary hard interface of this batman soft interface
  * @skb: the frame to be checked
@@ -1205,7 +1205,7 @@ static bool batadv_bla_process_claim(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_bla_purge_backbone_gw - Remove backbone gateways after a timeout or
+ * batadv_bla_purge_backbone_gw() - Remove backbone gateways after a timeout or
  *  immediately
  * @bat_priv: the bat priv with all the soft interface information
  * @now: whether the whole hash shall be wiped now
@@ -1258,7 +1258,7 @@ purge_now:
 }
 
 /**
- * batadv_bla_purge_claims - Remove claims after a timeout or immediately
+ * batadv_bla_purge_claims() - Remove claims after a timeout or immediately
  * @bat_priv: the bat priv with all the soft interface information
  * @primary_if: the selected primary interface, may be NULL if now is set
  * @now: whether the whole hash shall be wiped now
@@ -1316,7 +1316,7 @@ skip:
 }
 
 /**
- * batadv_bla_update_orig_address - Update the backbone gateways when the own
+ * batadv_bla_update_orig_address() - Update the backbone gateways when the own
  *  originator address changes
  * @bat_priv: the bat priv with all the soft interface information
  * @primary_if: the new selected primary_if
@@ -1372,7 +1372,7 @@ void batadv_bla_update_orig_address(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_bla_send_loopdetect - send a loopdetect frame
+ * batadv_bla_send_loopdetect() - send a loopdetect frame
  * @bat_priv: the bat priv with all the soft interface information
  * @backbone_gw: the backbone gateway for which a loop should be detected
  *
@@ -1392,7 +1392,7 @@ batadv_bla_send_loopdetect(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_bla_status_update - purge bla interfaces if necessary
+ * batadv_bla_status_update() - purge bla interfaces if necessary
  * @net_dev: the soft interface net device
  */
 void batadv_bla_status_update(struct net_device *net_dev)
@@ -1412,7 +1412,7 @@ void batadv_bla_status_update(struct net_device *net_dev)
 }
 
 /**
- * batadv_bla_periodic_work - performs periodic bla work
+ * batadv_bla_periodic_work() - performs periodic bla work
  * @work: kernel work struct
  *
  * periodic work to do:
@@ -1517,7 +1517,7 @@ static struct lock_class_key batadv_claim_hash_lock_class_key;
 static struct lock_class_key batadv_backbone_hash_lock_class_key;
 
 /**
- * batadv_bla_init - initialize all bla structures
+ * batadv_bla_init() - initialize all bla structures
  * @bat_priv: the bat priv with all the soft interface information
  *
  * Return: 0 on success, < 0 on error.
@@ -1579,7 +1579,7 @@ int batadv_bla_init(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_bla_check_bcast_duplist - Check if a frame is in the broadcast dup.
+ * batadv_bla_check_bcast_duplist() - Check if a frame is in the broadcast dup.
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: contains the bcast_packet to be checked
  *
@@ -1652,7 +1652,7 @@ out:
 }
 
 /**
- * batadv_bla_is_backbone_gw_orig - Check if the originator is a gateway for
+ * batadv_bla_is_backbone_gw_orig() - Check if the originator is a gateway for
  *  the VLAN identified by vid.
  * @bat_priv: the bat priv with all the soft interface information
  * @orig: originator mac address
@@ -1692,7 +1692,7 @@ bool batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, u8 *orig,
 }
 
 /**
- * batadv_bla_is_backbone_gw - check if originator is a backbone gw for a VLAN.
+ * batadv_bla_is_backbone_gw() - check if originator is a backbone gw for a VLAN
  * @skb: the frame to be checked
  * @orig_node: the orig_node of the frame
  * @hdr_size: maximum length of the frame
@@ -1726,7 +1726,7 @@ bool batadv_bla_is_backbone_gw(struct sk_buff *skb,
 }
 
 /**
- * batadv_bla_free - free all bla structures
+ * batadv_bla_free() - free all bla structures
  * @bat_priv: the bat priv with all the soft interface information
  *
  * for softinterface free or module unload
@@ -1753,7 +1753,7 @@ void batadv_bla_free(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_bla_loopdetect_check - check and handle a detected loop
+ * batadv_bla_loopdetect_check() - check and handle a detected loop
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: the packet to check
  * @primary_if: interface where the request came on
@@ -1802,7 +1802,7 @@ batadv_bla_loopdetect_check(struct batadv_priv *bat_priv, struct sk_buff *skb,
 }
 
 /**
- * batadv_bla_rx - check packets coming from the mesh.
+ * batadv_bla_rx() - check packets coming from the mesh.
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: the frame to be checked
  * @vid: the VLAN ID of the frame
@@ -1914,7 +1914,7 @@ out:
 }
 
 /**
- * batadv_bla_tx - check packets going into the mesh
+ * batadv_bla_tx() - check packets going into the mesh
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: the frame to be checked
  * @vid: the VLAN ID of the frame
@@ -2022,7 +2022,7 @@ out:
 
 #ifdef CONFIG_BATMAN_ADV_DEBUGFS
 /**
- * batadv_bla_claim_table_seq_print_text - print the claim table in a seq file
+ * batadv_bla_claim_table_seq_print_text() - print the claim table in a seq file
  * @seq: seq file to print on
  * @offset: not used
  *
@@ -2084,7 +2084,7 @@ out:
 #endif
 
 /**
- * batadv_bla_claim_dump_entry - dump one entry of the claim table
+ * batadv_bla_claim_dump_entry() - dump one entry of the claim table
  * to a netlink socket
  * @msg: buffer for the message
  * @portid: netlink port
@@ -2143,7 +2143,7 @@ out:
 }
 
 /**
- * batadv_bla_claim_dump_bucket - dump one bucket of the claim table
+ * batadv_bla_claim_dump_bucket() - dump one bucket of the claim table
  * to a netlink socket
  * @msg: buffer for the message
  * @portid: netlink port
@@ -2180,7 +2180,7 @@ unlock:
 }
 
 /**
- * batadv_bla_claim_dump - dump claim table to a netlink socket
+ * batadv_bla_claim_dump() - dump claim table to a netlink socket
  * @msg: buffer for the message
  * @cb: callback structure containing arguments
  *
@@ -2247,8 +2247,8 @@ out:
 
 #ifdef CONFIG_BATMAN_ADV_DEBUGFS
 /**
- * batadv_bla_backbone_table_seq_print_text - print the backbone table in a seq
- *  file
+ * batadv_bla_backbone_table_seq_print_text() - print the backbone table in a
+ *  seq file
  * @seq: seq file to print on
  * @offset: not used
  *
@@ -2312,8 +2312,8 @@ out:
 #endif
 
 /**
- * batadv_bla_backbone_dump_entry - dump one entry of the backbone table
- * to a netlink socket
+ * batadv_bla_backbone_dump_entry() - dump one entry of the backbone table to a
+ *  netlink socket
  * @msg: buffer for the message
  * @portid: netlink port
  * @seq: Sequence number of netlink message
@@ -2373,8 +2373,8 @@ out:
 }
 
 /**
- * batadv_bla_backbone_dump_bucket - dump one bucket of the backbone table
- * to a netlink socket
+ * batadv_bla_backbone_dump_bucket() - dump one bucket of the backbone table to
+ *  a netlink socket
  * @msg: buffer for the message
  * @portid: netlink port
  * @seq: Sequence number of netlink message
@@ -2410,7 +2410,7 @@ unlock:
 }
 
 /**
- * batadv_bla_backbone_dump - dump backbone table to a netlink socket
+ * batadv_bla_backbone_dump() - dump backbone table to a netlink socket
  * @msg: buffer for the message
  * @cb: callback structure containing arguments
  *
@@ -2477,7 +2477,7 @@ out:
 
 #ifdef CONFIG_BATMAN_ADV_DAT
 /**
- * batadv_bla_check_claim - check if address is claimed
+ * batadv_bla_check_claim() - check if address is claimed
  *
  * @bat_priv: the bat priv with all the soft interface information
  * @addr: mac address of which the claim status is checked
index 2347757..b27571a 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2011-2017  B.A.T.M.A.N. contributors:
  *
  * Simon Wunderlich
@@ -30,8 +31,8 @@ struct seq_file;
 struct sk_buff;
 
 /**
- * batadv_bla_is_loopdetect_mac - check if the mac address is from a loop detect
- *  frame sent by bridge loop avoidance
+ * batadv_bla_is_loopdetect_mac() - check if the mac address is from a loop
+ *  detect frame sent by bridge loop avoidance
  * @mac: mac address to check
  *
  * Return: true if the it looks like a loop detect frame
index e32ad47..21d1189 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2010-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
@@ -25,7 +26,6 @@
 #include <linux/fs.h>
 #include <linux/netdevice.h>
 #include <linux/printk.h>
-#include <linux/sched.h> /* for linux/wait.h */
 #include <linux/seq_file.h>
 #include <linux/stat.h>
 #include <linux/stddef.h>
@@ -66,8 +66,8 @@ static int batadv_originators_open(struct inode *inode, struct file *file)
 }
 
 /**
- * batadv_originators_hardif_open - handles debugfs output for the
- *  originator table of an hard interface
+ * batadv_originators_hardif_open() - handles debugfs output for the originator
+ *  table of an hard interface
  * @inode: inode pointer to debugfs file
  * @file: pointer to the seq_file
  *
@@ -117,7 +117,7 @@ static int batadv_bla_backbone_table_open(struct inode *inode,
 
 #ifdef CONFIG_BATMAN_ADV_DAT
 /**
- * batadv_dat_cache_open - Prepare file handler for reads from dat_chache
+ * batadv_dat_cache_open() - Prepare file handler for reads from dat_chache
  * @inode: inode which was opened
  * @file: file handle to be initialized
  *
@@ -154,7 +154,7 @@ static int batadv_nc_nodes_open(struct inode *inode, struct file *file)
 
 #ifdef CONFIG_BATMAN_ADV_MCAST
 /**
- * batadv_mcast_flags_open - prepare file handler for reads from mcast_flags
+ * batadv_mcast_flags_open() - prepare file handler for reads from mcast_flags
  * @inode: inode which was opened
  * @file: file handle to be initialized
  *
@@ -259,6 +259,9 @@ static struct batadv_debuginfo *batadv_hardif_debuginfos[] = {
        NULL,
 };
 
+/**
+ * batadv_debugfs_init() - Initialize soft interface independent debugfs entries
+ */
 void batadv_debugfs_init(void)
 {
        struct batadv_debuginfo **bat_debug;
@@ -289,6 +292,9 @@ err:
        batadv_debugfs = NULL;
 }
 
+/**
+ * batadv_debugfs_destroy() - Remove all debugfs entries
+ */
 void batadv_debugfs_destroy(void)
 {
        debugfs_remove_recursive(batadv_debugfs);
@@ -296,7 +302,7 @@ void batadv_debugfs_destroy(void)
 }
 
 /**
- * batadv_debugfs_add_hardif - creates the base directory for a hard interface
+ * batadv_debugfs_add_hardif() - creates the base directory for a hard interface
  *  in debugfs.
  * @hard_iface: hard interface which should be added.
  *
@@ -338,7 +344,7 @@ out:
 }
 
 /**
- * batadv_debugfs_del_hardif - delete the base directory for a hard interface
+ * batadv_debugfs_del_hardif() - delete the base directory for a hard interface
  *  in debugfs.
  * @hard_iface: hard interface which is deleted.
  */
@@ -355,6 +361,12 @@ void batadv_debugfs_del_hardif(struct batadv_hard_iface *hard_iface)
        }
 }
 
+/**
+ * batadv_debugfs_add_meshif() - Initialize interface dependent debugfs entries
+ * @dev: netdev struct of the soft interface
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
 int batadv_debugfs_add_meshif(struct net_device *dev)
 {
        struct batadv_priv *bat_priv = netdev_priv(dev);
@@ -401,6 +413,10 @@ out:
        return -ENOMEM;
 }
 
+/**
+ * batadv_debugfs_del_meshif() - Remove interface dependent debugfs entries
+ * @dev: netdev struct of the soft interface
+ */
 void batadv_debugfs_del_meshif(struct net_device *dev)
 {
        struct batadv_priv *bat_priv = netdev_priv(dev);
index 9c5d4a6..90a08d3 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2010-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
index 760c0de..9703c79 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2011-2017  B.A.T.M.A.N. contributors:
  *
  * Antonio Quartulli
@@ -23,7 +24,7 @@
 #include <linux/byteorder/generic.h>
 #include <linux/errno.h>
 #include <linux/etherdevice.h>
-#include <linux/fs.h>
+#include <linux/gfp.h>
 #include <linux/if_arp.h>
 #include <linux/if_ether.h>
 #include <linux/if_vlan.h>
@@ -55,7 +56,7 @@
 static void batadv_dat_purge(struct work_struct *work);
 
 /**
- * batadv_dat_start_timer - initialise the DAT periodic worker
+ * batadv_dat_start_timer() - initialise the DAT periodic worker
  * @bat_priv: the bat priv with all the soft interface information
  */
 static void batadv_dat_start_timer(struct batadv_priv *bat_priv)
@@ -66,7 +67,7 @@ static void batadv_dat_start_timer(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_dat_entry_release - release dat_entry from lists and queue for free
+ * batadv_dat_entry_release() - release dat_entry from lists and queue for free
  *  after rcu grace period
  * @ref: kref pointer of the dat_entry
  */
@@ -80,7 +81,7 @@ static void batadv_dat_entry_release(struct kref *ref)
 }
 
 /**
- * batadv_dat_entry_put - decrement the dat_entry refcounter and possibly
+ * batadv_dat_entry_put() - decrement the dat_entry refcounter and possibly
  *  release it
  * @dat_entry: dat_entry to be free'd
  */
@@ -90,7 +91,7 @@ static void batadv_dat_entry_put(struct batadv_dat_entry *dat_entry)
 }
 
 /**
- * batadv_dat_to_purge - check whether a dat_entry has to be purged or not
+ * batadv_dat_to_purge() - check whether a dat_entry has to be purged or not
  * @dat_entry: the entry to check
  *
  * Return: true if the entry has to be purged now, false otherwise.
@@ -102,7 +103,7 @@ static bool batadv_dat_to_purge(struct batadv_dat_entry *dat_entry)
 }
 
 /**
- * __batadv_dat_purge - delete entries from the DAT local storage
+ * __batadv_dat_purge() - delete entries from the DAT local storage
  * @bat_priv: the bat priv with all the soft interface information
  * @to_purge: function in charge to decide whether an entry has to be purged or
  *           not. This function takes the dat_entry as argument and has to
@@ -145,8 +146,8 @@ static void __batadv_dat_purge(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_dat_purge - periodic task that deletes old entries from the local DAT
- * hash table
+ * batadv_dat_purge() - periodic task that deletes old entries from the local
+ *  DAT hash table
  * @work: kernel work struct
  */
 static void batadv_dat_purge(struct work_struct *work)
@@ -164,7 +165,7 @@ static void batadv_dat_purge(struct work_struct *work)
 }
 
 /**
- * batadv_compare_dat - comparing function used in the local DAT hash table
+ * batadv_compare_dat() - comparing function used in the local DAT hash table
  * @node: node in the local table
  * @data2: second object to compare the node to
  *
@@ -179,7 +180,7 @@ static bool batadv_compare_dat(const struct hlist_node *node, const void *data2)
 }
 
 /**
- * batadv_arp_hw_src - extract the hw_src field from an ARP packet
+ * batadv_arp_hw_src() - extract the hw_src field from an ARP packet
  * @skb: ARP packet
  * @hdr_size: size of the possible header before the ARP packet
  *
@@ -196,7 +197,7 @@ static u8 *batadv_arp_hw_src(struct sk_buff *skb, int hdr_size)
 }
 
 /**
- * batadv_arp_ip_src - extract the ip_src field from an ARP packet
+ * batadv_arp_ip_src() - extract the ip_src field from an ARP packet
  * @skb: ARP packet
  * @hdr_size: size of the possible header before the ARP packet
  *
@@ -208,7 +209,7 @@ static __be32 batadv_arp_ip_src(struct sk_buff *skb, int hdr_size)
 }
 
 /**
- * batadv_arp_hw_dst - extract the hw_dst field from an ARP packet
+ * batadv_arp_hw_dst() - extract the hw_dst field from an ARP packet
  * @skb: ARP packet
  * @hdr_size: size of the possible header before the ARP packet
  *
@@ -220,7 +221,7 @@ static u8 *batadv_arp_hw_dst(struct sk_buff *skb, int hdr_size)
 }
 
 /**
- * batadv_arp_ip_dst - extract the ip_dst field from an ARP packet
+ * batadv_arp_ip_dst() - extract the ip_dst field from an ARP packet
  * @skb: ARP packet
  * @hdr_size: size of the possible header before the ARP packet
  *
@@ -232,7 +233,7 @@ static __be32 batadv_arp_ip_dst(struct sk_buff *skb, int hdr_size)
 }
 
 /**
- * batadv_hash_dat - compute the hash value for an IP address
+ * batadv_hash_dat() - compute the hash value for an IP address
  * @data: data to hash
  * @size: size of the hash table
  *
@@ -267,7 +268,7 @@ static u32 batadv_hash_dat(const void *data, u32 size)
 }
 
 /**
- * batadv_dat_entry_hash_find - look for a given dat_entry in the local hash
+ * batadv_dat_entry_hash_find() - look for a given dat_entry in the local hash
  * table
  * @bat_priv: the bat priv with all the soft interface information
  * @ip: search key
@@ -310,7 +311,7 @@ batadv_dat_entry_hash_find(struct batadv_priv *bat_priv, __be32 ip,
 }
 
 /**
- * batadv_dat_entry_add - add a new dat entry or update it if already exists
+ * batadv_dat_entry_add() - add a new dat entry or update it if already exists
  * @bat_priv: the bat priv with all the soft interface information
  * @ip: ipv4 to add/edit
  * @mac_addr: mac address to assign to the given ipv4
@@ -367,7 +368,8 @@ out:
 #ifdef CONFIG_BATMAN_ADV_DEBUG
 
 /**
- * batadv_dbg_arp - print a debug message containing all the ARP packet details
+ * batadv_dbg_arp() - print a debug message containing all the ARP packet
+ *  details
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: ARP packet
  * @hdr_size: size of the possible header before the ARP packet
@@ -448,7 +450,7 @@ static void batadv_dbg_arp(struct batadv_priv *bat_priv, struct sk_buff *skb,
 #endif /* CONFIG_BATMAN_ADV_DEBUG */
 
 /**
- * batadv_is_orig_node_eligible - check whether a node can be a DHT candidate
+ * batadv_is_orig_node_eligible() - check whether a node can be a DHT candidate
  * @res: the array with the already selected candidates
  * @select: number of already selected candidates
  * @tmp_max: address of the currently evaluated node
@@ -502,7 +504,7 @@ out:
 }
 
 /**
- * batadv_choose_next_candidate - select the next DHT candidate
+ * batadv_choose_next_candidate() - select the next DHT candidate
  * @bat_priv: the bat priv with all the soft interface information
  * @cands: candidates array
  * @select: number of candidates already present in the array
@@ -566,8 +568,8 @@ static void batadv_choose_next_candidate(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_dat_select_candidates - select the nodes which the DHT message has to
- * be sent to
+ * batadv_dat_select_candidates() - select the nodes which the DHT message has
+ *  to be sent to
  * @bat_priv: the bat priv with all the soft interface information
  * @ip_dst: ipv4 to look up in the DHT
  * @vid: VLAN identifier
@@ -612,7 +614,7 @@ batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst,
 }
 
 /**
- * batadv_dat_send_data - send a payload to the selected candidates
+ * batadv_dat_send_data() - send a payload to the selected candidates
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: payload to send
  * @ip: the DHT key
@@ -688,7 +690,7 @@ out:
 }
 
 /**
- * batadv_dat_tvlv_container_update - update the dat tvlv container after dat
+ * batadv_dat_tvlv_container_update() - update the dat tvlv container after dat
  *  setting change
  * @bat_priv: the bat priv with all the soft interface information
  */
@@ -710,7 +712,7 @@ static void batadv_dat_tvlv_container_update(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_dat_status_update - update the dat tvlv container after dat
+ * batadv_dat_status_update() - update the dat tvlv container after dat
  *  setting change
  * @net_dev: the soft interface net device
  */
@@ -722,7 +724,7 @@ void batadv_dat_status_update(struct net_device *net_dev)
 }
 
 /**
- * batadv_dat_tvlv_ogm_handler_v1 - process incoming dat tvlv container
+ * batadv_dat_tvlv_ogm_handler_v1() - process incoming dat tvlv container
  * @bat_priv: the bat priv with all the soft interface information
  * @orig: the orig_node of the ogm
  * @flags: flags indicating the tvlv state (see batadv_tvlv_handler_flags)
@@ -741,7 +743,7 @@ static void batadv_dat_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_dat_hash_free - free the local DAT hash table
+ * batadv_dat_hash_free() - free the local DAT hash table
  * @bat_priv: the bat priv with all the soft interface information
  */
 static void batadv_dat_hash_free(struct batadv_priv *bat_priv)
@@ -757,7 +759,7 @@ static void batadv_dat_hash_free(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_dat_init - initialise the DAT internals
+ * batadv_dat_init() - initialise the DAT internals
  * @bat_priv: the bat priv with all the soft interface information
  *
  * Return: 0 in case of success, a negative error code otherwise
@@ -782,7 +784,7 @@ int batadv_dat_init(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_dat_free - free the DAT internals
+ * batadv_dat_free() - free the DAT internals
  * @bat_priv: the bat priv with all the soft interface information
  */
 void batadv_dat_free(struct batadv_priv *bat_priv)
@@ -797,7 +799,7 @@ void batadv_dat_free(struct batadv_priv *bat_priv)
 
 #ifdef CONFIG_BATMAN_ADV_DEBUGFS
 /**
- * batadv_dat_cache_seq_print_text - print the local DAT hash table
+ * batadv_dat_cache_seq_print_text() - print the local DAT hash table
  * @seq: seq file to print on
  * @offset: not used
  *
@@ -850,7 +852,7 @@ out:
 #endif
 
 /**
- * batadv_arp_get_type - parse an ARP packet and gets the type
+ * batadv_arp_get_type() - parse an ARP packet and gets the type
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: packet to analyse
  * @hdr_size: size of the possible header before the ARP packet in the skb
@@ -924,7 +926,7 @@ out:
 }
 
 /**
- * batadv_dat_get_vid - extract the VLAN identifier from skb if any
+ * batadv_dat_get_vid() - extract the VLAN identifier from skb if any
  * @skb: the buffer containing the packet to extract the VID from
  * @hdr_size: the size of the batman-adv header encapsulating the packet
  *
@@ -950,7 +952,7 @@ static unsigned short batadv_dat_get_vid(struct sk_buff *skb, int *hdr_size)
 }
 
 /**
- * batadv_dat_arp_create_reply - create an ARP Reply
+ * batadv_dat_arp_create_reply() - create an ARP Reply
  * @bat_priv: the bat priv with all the soft interface information
  * @ip_src: ARP sender IP
  * @ip_dst: ARP target IP
@@ -985,7 +987,7 @@ batadv_dat_arp_create_reply(struct batadv_priv *bat_priv, __be32 ip_src,
 }
 
 /**
- * batadv_dat_snoop_outgoing_arp_request - snoop the ARP request and try to
+ * batadv_dat_snoop_outgoing_arp_request() - snoop the ARP request and try to
  * answer using DAT
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: packet to check
@@ -1083,7 +1085,7 @@ out:
 }
 
 /**
- * batadv_dat_snoop_incoming_arp_request - snoop the ARP request and try to
+ * batadv_dat_snoop_incoming_arp_request() - snoop the ARP request and try to
  * answer using the local DAT storage
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: packet to check
@@ -1153,7 +1155,7 @@ out:
 }
 
 /**
- * batadv_dat_snoop_outgoing_arp_reply - snoop the ARP reply and fill the DHT
+ * batadv_dat_snoop_outgoing_arp_reply() - snoop the ARP reply and fill the DHT
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: packet to check
  */
@@ -1193,8 +1195,8 @@ void batadv_dat_snoop_outgoing_arp_reply(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_dat_snoop_incoming_arp_reply - snoop the ARP reply and fill the local
- * DAT storage only
+ * batadv_dat_snoop_incoming_arp_reply() - snoop the ARP reply and fill the
+ *  local DAT storage only
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: packet to check
  * @hdr_size: size of the encapsulation header
@@ -1282,8 +1284,8 @@ out:
 }
 
 /**
- * batadv_dat_drop_broadcast_packet - check if an ARP request has to be dropped
- * (because the node has already obtained the reply via DAT) or not
+ * batadv_dat_drop_broadcast_packet() - check if an ARP request has to be
+ *  dropped (because the node has already obtained the reply via DAT) or not
  * @bat_priv: the bat priv with all the soft interface information
  * @forw_packet: the broadcast packet
  *
index ec364a3..12897eb 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2011-2017  B.A.T.M.A.N. contributors:
  *
  * Antonio Quartulli
@@ -23,9 +24,9 @@
 #include <linux/compiler.h>
 #include <linux/netdevice.h>
 #include <linux/types.h>
+#include <uapi/linux/batadv_packet.h>
 
 #include "originator.h"
-#include "packet.h"
 
 struct seq_file;
 struct sk_buff;
@@ -48,7 +49,7 @@ bool batadv_dat_drop_broadcast_packet(struct batadv_priv *bat_priv,
                                      struct batadv_forw_packet *forw_packet);
 
 /**
- * batadv_dat_init_orig_node_addr - assign a DAT address to the orig_node
+ * batadv_dat_init_orig_node_addr() - assign a DAT address to the orig_node
  * @orig_node: the node to assign the DAT address to
  */
 static inline void
@@ -61,7 +62,7 @@ batadv_dat_init_orig_node_addr(struct batadv_orig_node *orig_node)
 }
 
 /**
- * batadv_dat_init_own_addr - assign a DAT address to the node itself
+ * batadv_dat_init_own_addr() - assign a DAT address to the node itself
  * @bat_priv: the bat priv with all the soft interface information
  * @primary_if: a pointer to the primary interface
  */
@@ -82,7 +83,7 @@ void batadv_dat_free(struct batadv_priv *bat_priv);
 int batadv_dat_cache_seq_print_text(struct seq_file *seq, void *offset);
 
 /**
- * batadv_dat_inc_counter - increment the correct DAT packet counter
+ * batadv_dat_inc_counter() - increment the correct DAT packet counter
  * @bat_priv: the bat priv with all the soft interface information
  * @subtype: the 4addr subtype of the packet to be counted
  *
index ebe6e38..22dde42 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2013-2017  B.A.T.M.A.N. contributors:
  *
  * Martin Hundebøll <martin@hundeboll.net>
@@ -22,7 +23,7 @@
 #include <linux/byteorder/generic.h>
 #include <linux/errno.h>
 #include <linux/etherdevice.h>
-#include <linux/fs.h>
+#include <linux/gfp.h>
 #include <linux/if_ether.h>
 #include <linux/jiffies.h>
 #include <linux/kernel.h>
 #include <linux/slab.h>
 #include <linux/spinlock.h>
 #include <linux/string.h>
+#include <uapi/linux/batadv_packet.h>
 
 #include "hard-interface.h"
 #include "originator.h"
-#include "packet.h"
 #include "routing.h"
 #include "send.h"
 #include "soft-interface.h"
 
 /**
- * batadv_frag_clear_chain - delete entries in the fragment buffer chain
+ * batadv_frag_clear_chain() - delete entries in the fragment buffer chain
  * @head: head of chain with entries.
  * @dropped: whether the chain is cleared because all fragments are dropped
  *
@@ -65,7 +66,7 @@ static void batadv_frag_clear_chain(struct hlist_head *head, bool dropped)
 }
 
 /**
- * batadv_frag_purge_orig - free fragments associated to an orig
+ * batadv_frag_purge_orig() - free fragments associated to an orig
  * @orig_node: originator to free fragments from
  * @check_cb: optional function to tell if an entry should be purged
  */
@@ -89,7 +90,7 @@ void batadv_frag_purge_orig(struct batadv_orig_node *orig_node,
 }
 
 /**
- * batadv_frag_size_limit - maximum possible size of packet to be fragmented
+ * batadv_frag_size_limit() - maximum possible size of packet to be fragmented
  *
  * Return: the maximum size of payload that can be fragmented.
  */
@@ -104,7 +105,7 @@ static int batadv_frag_size_limit(void)
 }
 
 /**
- * batadv_frag_init_chain - check and prepare fragment chain for new fragment
+ * batadv_frag_init_chain() - check and prepare fragment chain for new fragment
  * @chain: chain in fragments table to init
  * @seqno: sequence number of the received fragment
  *
@@ -134,7 +135,7 @@ static bool batadv_frag_init_chain(struct batadv_frag_table_entry *chain,
 }
 
 /**
- * batadv_frag_insert_packet - insert a fragment into a fragment chain
+ * batadv_frag_insert_packet() - insert a fragment into a fragment chain
  * @orig_node: originator that the fragment was received from
  * @skb: skb to insert
  * @chain_out: list head to attach complete chains of fragments to
@@ -248,7 +249,7 @@ err:
 }
 
 /**
- * batadv_frag_merge_packets - merge a chain of fragments
+ * batadv_frag_merge_packets() - merge a chain of fragments
  * @chain: head of chain with fragments
  *
  * Expand the first skb in the chain and copy the content of the remaining
@@ -306,7 +307,7 @@ free:
 }
 
 /**
- * batadv_frag_skb_buffer - buffer fragment for later merge
+ * batadv_frag_skb_buffer() - buffer fragment for later merge
  * @skb: skb to buffer
  * @orig_node_src: originator that the skb is received from
  *
@@ -346,7 +347,7 @@ out_err:
 }
 
 /**
- * batadv_frag_skb_fwd - forward fragments that would exceed MTU when merged
+ * batadv_frag_skb_fwd() - forward fragments that would exceed MTU when merged
  * @skb: skb to forward
  * @recv_if: interface that the skb is received on
  * @orig_node_src: originator that the skb is received from
@@ -400,7 +401,7 @@ out:
 }
 
 /**
- * batadv_frag_create - create a fragment from skb
+ * batadv_frag_create() - create a fragment from skb
  * @skb: skb to create fragment from
  * @frag_head: header to use in new fragment
  * @fragment_size: size of new fragment
@@ -438,7 +439,7 @@ err:
 }
 
 /**
- * batadv_frag_send_packet - create up to 16 fragments from the passed skb
+ * batadv_frag_send_packet() - create up to 16 fragments from the passed skb
  * @skb: skb to create fragments from
  * @orig_node: final destination of the created fragments
  * @neigh_node: next-hop of the created fragments
index 1a2d6c3..138b22a 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2013-2017  B.A.T.M.A.N. contributors:
  *
  * Martin Hundebøll <martin@hundeboll.net>
@@ -39,7 +40,7 @@ int batadv_frag_send_packet(struct sk_buff *skb,
                            struct batadv_neigh_node *neigh_node);
 
 /**
- * batadv_frag_check_entry - check if a list of fragments has timed out
+ * batadv_frag_check_entry() - check if a list of fragments has timed out
  * @frags_entry: table entry to check
  *
  * Return: true if the frags entry has timed out, false otherwise.
index 10d521f..37fe9a6 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2009-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
@@ -22,7 +23,7 @@
 #include <linux/byteorder/generic.h>
 #include <linux/errno.h>
 #include <linux/etherdevice.h>
-#include <linux/fs.h>
+#include <linux/gfp.h>
 #include <linux/if_ether.h>
 #include <linux/if_vlan.h>
 #include <linux/in.h>
@@ -42,6 +43,7 @@
 #include <linux/stddef.h>
 #include <linux/udp.h>
 #include <net/sock.h>
+#include <uapi/linux/batadv_packet.h>
 #include <uapi/linux/batman_adv.h>
 
 #include "gateway_common.h"
@@ -49,7 +51,6 @@
 #include "log.h"
 #include "netlink.h"
 #include "originator.h"
-#include "packet.h"
 #include "routing.h"
 #include "soft-interface.h"
 #include "sysfs.h"
@@ -68,8 +69,8 @@
 #define BATADV_DHCP_CHADDR_OFFSET      28
 
 /**
- * batadv_gw_node_release - release gw_node from lists and queue for free after
- *  rcu grace period
+ * batadv_gw_node_release() - release gw_node from lists and queue for free
+ *  after rcu grace period
  * @ref: kref pointer of the gw_node
  */
 static void batadv_gw_node_release(struct kref *ref)
@@ -83,7 +84,8 @@ static void batadv_gw_node_release(struct kref *ref)
 }
 
 /**
- * batadv_gw_node_put - decrement the gw_node refcounter and possibly release it
+ * batadv_gw_node_put() - decrement the gw_node refcounter and possibly release
+ *  it
  * @gw_node: gateway node to free
  */
 void batadv_gw_node_put(struct batadv_gw_node *gw_node)
@@ -91,6 +93,12 @@ void batadv_gw_node_put(struct batadv_gw_node *gw_node)
        kref_put(&gw_node->refcount, batadv_gw_node_release);
 }
 
+/**
+ * batadv_gw_get_selected_gw_node() - Get currently selected gateway
+ * @bat_priv: the bat priv with all the soft interface information
+ *
+ * Return: selected gateway (with increased refcnt), NULL on errors
+ */
 struct batadv_gw_node *
 batadv_gw_get_selected_gw_node(struct batadv_priv *bat_priv)
 {
@@ -109,6 +117,12 @@ out:
        return gw_node;
 }
 
+/**
+ * batadv_gw_get_selected_orig() - Get originator of currently selected gateway
+ * @bat_priv: the bat priv with all the soft interface information
+ *
+ * Return: orig_node of selected gateway (with increased refcnt), NULL on errors
+ */
 struct batadv_orig_node *
 batadv_gw_get_selected_orig(struct batadv_priv *bat_priv)
 {
@@ -155,7 +169,7 @@ static void batadv_gw_select(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_gw_reselect - force a gateway reselection
+ * batadv_gw_reselect() - force a gateway reselection
  * @bat_priv: the bat priv with all the soft interface information
  *
  * Set a flag to remind the GW component to perform a new gateway reselection.
@@ -171,7 +185,7 @@ void batadv_gw_reselect(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_gw_check_client_stop - check if client mode has been switched off
+ * batadv_gw_check_client_stop() - check if client mode has been switched off
  * @bat_priv: the bat priv with all the soft interface information
  *
  * This function assumes the caller has checked that the gw state *is actually
@@ -202,6 +216,10 @@ void batadv_gw_check_client_stop(struct batadv_priv *bat_priv)
        batadv_gw_node_put(curr_gw);
 }
 
+/**
+ * batadv_gw_election() - Elect the best gateway
+ * @bat_priv: the bat priv with all the soft interface information
+ */
 void batadv_gw_election(struct batadv_priv *bat_priv)
 {
        struct batadv_gw_node *curr_gw = NULL;
@@ -290,6 +308,11 @@ out:
                batadv_neigh_ifinfo_put(router_ifinfo);
 }
 
+/**
+ * batadv_gw_check_election() - Elect orig node as best gateway when eligible
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig_node: orig node which is to be checked
+ */
 void batadv_gw_check_election(struct batadv_priv *bat_priv,
                              struct batadv_orig_node *orig_node)
 {
@@ -321,7 +344,7 @@ out:
 }
 
 /**
- * batadv_gw_node_add - add gateway node to list of available gateways
+ * batadv_gw_node_add() - add gateway node to list of available gateways
  * @bat_priv: the bat priv with all the soft interface information
  * @orig_node: originator announcing gateway capabilities
  * @gateway: announced bandwidth information
@@ -364,7 +387,7 @@ static void batadv_gw_node_add(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_gw_node_get - retrieve gateway node from list of available gateways
+ * batadv_gw_node_get() - retrieve gateway node from list of available gateways
  * @bat_priv: the bat priv with all the soft interface information
  * @orig_node: originator announcing gateway capabilities
  *
@@ -393,7 +416,7 @@ struct batadv_gw_node *batadv_gw_node_get(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_gw_node_update - update list of available gateways with changed
+ * batadv_gw_node_update() - update list of available gateways with changed
  *  bandwidth information
  * @bat_priv: the bat priv with all the soft interface information
  * @orig_node: originator announcing gateway capabilities
@@ -458,6 +481,11 @@ out:
                batadv_gw_node_put(gw_node);
 }
 
+/**
+ * batadv_gw_node_delete() - Remove orig_node from gateway list
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig_node: orig node which is currently in process of being removed
+ */
 void batadv_gw_node_delete(struct batadv_priv *bat_priv,
                           struct batadv_orig_node *orig_node)
 {
@@ -469,6 +497,10 @@ void batadv_gw_node_delete(struct batadv_priv *bat_priv,
        batadv_gw_node_update(bat_priv, orig_node, &gateway);
 }
 
+/**
+ * batadv_gw_node_free() - Free gateway information from soft interface
+ * @bat_priv: the bat priv with all the soft interface information
+ */
 void batadv_gw_node_free(struct batadv_priv *bat_priv)
 {
        struct batadv_gw_node *gw_node;
@@ -484,6 +516,14 @@ void batadv_gw_node_free(struct batadv_priv *bat_priv)
 }
 
 #ifdef CONFIG_BATMAN_ADV_DEBUGFS
+
+/**
+ * batadv_gw_client_seq_print_text() - Print the gateway table in a seq file
+ * @seq: seq file to print on
+ * @offset: not used
+ *
+ * Return: always 0
+ */
 int batadv_gw_client_seq_print_text(struct seq_file *seq, void *offset)
 {
        struct net_device *net_dev = (struct net_device *)seq->private;
@@ -514,7 +554,7 @@ int batadv_gw_client_seq_print_text(struct seq_file *seq, void *offset)
 #endif
 
 /**
- * batadv_gw_dump - Dump gateways into a message
+ * batadv_gw_dump() - Dump gateways into a message
  * @msg: Netlink message to dump into
  * @cb: Control block containing additional options
  *
@@ -567,7 +607,7 @@ out:
 }
 
 /**
- * batadv_gw_dhcp_recipient_get - check if a packet is a DHCP message
+ * batadv_gw_dhcp_recipient_get() - check if a packet is a DHCP message
  * @skb: the packet to check
  * @header_len: a pointer to the batman-adv header size
  * @chaddr: buffer where the client address will be stored. Valid
@@ -686,7 +726,8 @@ batadv_gw_dhcp_recipient_get(struct sk_buff *skb, unsigned int *header_len,
 }
 
 /**
- * batadv_gw_out_of_range - check if the dhcp request destination is the best gw
+ * batadv_gw_out_of_range() - check if the dhcp request destination is the best
+ *  gateway
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: the outgoing packet
  *
index 3baa3d4..981f584 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2009-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
index 2c26039..b3e156a 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2009-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
 #include <linux/netdevice.h>
 #include <linux/stddef.h>
 #include <linux/string.h>
+#include <uapi/linux/batadv_packet.h>
 
 #include "gateway_client.h"
 #include "log.h"
-#include "packet.h"
 #include "tvlv.h"
 
 /**
- * batadv_parse_throughput - parse supplied string buffer to extract throughput
- *  information
+ * batadv_parse_throughput() - parse supplied string buffer to extract
+ *  throughput information
  * @net_dev: the soft interface net device
  * @buff: string buffer to parse
  * @description: text shown when throughput string cannot be parsed
@@ -100,8 +101,8 @@ bool batadv_parse_throughput(struct net_device *net_dev, char *buff,
 }
 
 /**
- * batadv_parse_gw_bandwidth - parse supplied string buffer to extract download
- *  and upload bandwidth information
+ * batadv_parse_gw_bandwidth() - parse supplied string buffer to extract
+ *  download and upload bandwidth information
  * @net_dev: the soft interface net device
  * @buff: string buffer to parse
  * @down: pointer holding the returned download bandwidth information
@@ -136,8 +137,8 @@ static bool batadv_parse_gw_bandwidth(struct net_device *net_dev, char *buff,
 }
 
 /**
- * batadv_gw_tvlv_container_update - update the gw tvlv container after gateway
- *  setting change
+ * batadv_gw_tvlv_container_update() - update the gw tvlv container after
+ *  gateway setting change
  * @bat_priv: the bat priv with all the soft interface information
  */
 void batadv_gw_tvlv_container_update(struct batadv_priv *bat_priv)
@@ -164,6 +165,15 @@ void batadv_gw_tvlv_container_update(struct batadv_priv *bat_priv)
        }
 }
 
+/**
+ * batadv_gw_bandwidth_set() - Parse and set download/upload gateway bandwidth
+ *  from supplied string buffer
+ * @net_dev: netdev struct of the soft interface
+ * @buff: the buffer containing the user data
+ * @count: number of bytes in the buffer
+ *
+ * Return: 'count' on success or a negative error code in case of failure
+ */
 ssize_t batadv_gw_bandwidth_set(struct net_device *net_dev, char *buff,
                                size_t count)
 {
@@ -207,7 +217,7 @@ ssize_t batadv_gw_bandwidth_set(struct net_device *net_dev, char *buff,
 }
 
 /**
- * batadv_gw_tvlv_ogm_handler_v1 - process incoming gateway tvlv container
+ * batadv_gw_tvlv_ogm_handler_v1() - process incoming gateway tvlv container
  * @bat_priv: the bat priv with all the soft interface information
  * @orig: the orig_node of the ogm
  * @flags: flags indicating the tvlv state (see batadv_tvlv_handler_flags)
@@ -248,7 +258,7 @@ static void batadv_gw_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_gw_init - initialise the gateway handling internals
+ * batadv_gw_init() - initialise the gateway handling internals
  * @bat_priv: the bat priv with all the soft interface information
  */
 void batadv_gw_init(struct batadv_priv *bat_priv)
@@ -264,7 +274,7 @@ void batadv_gw_init(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_gw_free - free the gateway handling internals
+ * batadv_gw_free() - free the gateway handling internals
  * @bat_priv: the bat priv with all the soft interface information
  */
 void batadv_gw_free(struct batadv_priv *bat_priv)
index 0a6a97d..afebd9c 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2009-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
@@ -32,11 +33,12 @@ enum batadv_gw_modes {
 
 /**
  * enum batadv_bandwidth_units - bandwidth unit types
- * @BATADV_BW_UNIT_KBIT: unit type kbit
- * @BATADV_BW_UNIT_MBIT: unit type mbit
  */
 enum batadv_bandwidth_units {
+       /** @BATADV_BW_UNIT_KBIT: unit type kbit */
        BATADV_BW_UNIT_KBIT,
+
+       /** @BATADV_BW_UNIT_MBIT: unit type mbit */
        BATADV_BW_UNIT_MBIT,
 };
 
index 4e3d534..5f186bf 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
@@ -22,7 +23,7 @@
 #include <linux/bug.h>
 #include <linux/byteorder/generic.h>
 #include <linux/errno.h>
-#include <linux/fs.h>
+#include <linux/gfp.h>
 #include <linux/if.h>
 #include <linux/if_arp.h>
 #include <linux/if_ether.h>
@@ -37,6 +38,7 @@
 #include <linux/spinlock.h>
 #include <net/net_namespace.h>
 #include <net/rtnetlink.h>
+#include <uapi/linux/batadv_packet.h>
 
 #include "bat_v.h"
 #include "bridge_loop_avoidance.h"
 #include "gateway_client.h"
 #include "log.h"
 #include "originator.h"
-#include "packet.h"
 #include "send.h"
 #include "soft-interface.h"
 #include "sysfs.h"
 #include "translation-table.h"
 
 /**
- * batadv_hardif_release - release hard interface from lists and queue for
+ * batadv_hardif_release() - release hard interface from lists and queue for
  *  free after rcu grace period
  * @ref: kref pointer of the hard interface
  */
@@ -66,6 +67,12 @@ void batadv_hardif_release(struct kref *ref)
        kfree_rcu(hard_iface, rcu);
 }
 
+/**
+ * batadv_hardif_get_by_netdev() - Get hard interface object of a net_device
+ * @net_dev: net_device to search for
+ *
+ * Return: batadv_hard_iface of net_dev (with increased refcnt), NULL on errors
+ */
 struct batadv_hard_iface *
 batadv_hardif_get_by_netdev(const struct net_device *net_dev)
 {
@@ -86,7 +93,7 @@ out:
 }
 
 /**
- * batadv_getlink_net - return link net namespace (of use fallback)
+ * batadv_getlink_net() - return link net namespace (of use fallback)
  * @netdev: net_device to check
  * @fallback_net: return in case get_link_net is not available for @netdev
  *
@@ -105,7 +112,7 @@ static struct net *batadv_getlink_net(const struct net_device *netdev,
 }
 
 /**
- * batadv_mutual_parents - check if two devices are each others parent
+ * batadv_mutual_parents() - check if two devices are each others parent
  * @dev1: 1st net dev
  * @net1: 1st devices netns
  * @dev2: 2nd net dev
@@ -138,7 +145,7 @@ static bool batadv_mutual_parents(const struct net_device *dev1,
 }
 
 /**
- * batadv_is_on_batman_iface - check if a device is a batman iface descendant
+ * batadv_is_on_batman_iface() - check if a device is a batman iface descendant
  * @net_dev: the device to check
  *
  * If the user creates any virtual device on top of a batman-adv interface, it
@@ -202,7 +209,7 @@ static bool batadv_is_valid_iface(const struct net_device *net_dev)
 }
 
 /**
- * batadv_get_real_netdevice - check if the given netdev struct is a virtual
+ * batadv_get_real_netdevice() - check if the given netdev struct is a virtual
  *  interface on top of another 'real' interface
  * @netdev: the device to check
  *
@@ -246,7 +253,7 @@ out:
 }
 
 /**
- * batadv_get_real_netdev - check if the given net_device struct is a virtual
+ * batadv_get_real_netdev() - check if the given net_device struct is a virtual
  *  interface on top of another 'real' interface
  * @net_device: the device to check
  *
@@ -265,7 +272,7 @@ struct net_device *batadv_get_real_netdev(struct net_device *net_device)
 }
 
 /**
- * batadv_is_wext_netdev - check if the given net_device struct is a
+ * batadv_is_wext_netdev() - check if the given net_device struct is a
  *  wext wifi interface
  * @net_device: the device to check
  *
@@ -289,7 +296,7 @@ static bool batadv_is_wext_netdev(struct net_device *net_device)
 }
 
 /**
- * batadv_is_cfg80211_netdev - check if the given net_device struct is a
+ * batadv_is_cfg80211_netdev() - check if the given net_device struct is a
  *  cfg80211 wifi interface
  * @net_device: the device to check
  *
@@ -309,7 +316,7 @@ static bool batadv_is_cfg80211_netdev(struct net_device *net_device)
 }
 
 /**
- * batadv_wifi_flags_evaluate - calculate wifi flags for net_device
+ * batadv_wifi_flags_evaluate() - calculate wifi flags for net_device
  * @net_device: the device to check
  *
  * Return: batadv_hard_iface_wifi_flags flags of the device
@@ -344,7 +351,7 @@ out:
 }
 
 /**
- * batadv_is_cfg80211_hardif - check if the given hardif is a cfg80211 wifi
+ * batadv_is_cfg80211_hardif() - check if the given hardif is a cfg80211 wifi
  *  interface
  * @hard_iface: the device to check
  *
@@ -362,7 +369,7 @@ bool batadv_is_cfg80211_hardif(struct batadv_hard_iface *hard_iface)
 }
 
 /**
- * batadv_is_wifi_hardif - check if the given hardif is a wifi interface
+ * batadv_is_wifi_hardif() - check if the given hardif is a wifi interface
  * @hard_iface: the device to check
  *
  * Return: true if the net device is a 802.11 wireless device, false otherwise.
@@ -376,7 +383,7 @@ bool batadv_is_wifi_hardif(struct batadv_hard_iface *hard_iface)
 }
 
 /**
- * batadv_hardif_no_broadcast - check whether (re)broadcast is necessary
+ * batadv_hardif_no_broadcast() - check whether (re)broadcast is necessary
  * @if_outgoing: the outgoing interface checked and considered for (re)broadcast
  * @orig_addr: the originator of this packet
  * @orig_neigh: originator address of the forwarder we just got the packet from
@@ -560,6 +567,13 @@ static void batadv_hardif_recalc_extra_skbroom(struct net_device *soft_iface)
        soft_iface->needed_tailroom = lower_tailroom;
 }
 
+/**
+ * batadv_hardif_min_mtu() - Calculate maximum MTU for soft interface
+ * @soft_iface: netdev struct of the soft interface
+ *
+ * Return: MTU for the soft-interface (limited by the minimal MTU of all active
+ *  slave interfaces)
+ */
 int batadv_hardif_min_mtu(struct net_device *soft_iface)
 {
        struct batadv_priv *bat_priv = netdev_priv(soft_iface);
@@ -606,7 +620,11 @@ out:
        return min_t(int, min_mtu - batadv_max_header_len(), ETH_DATA_LEN);
 }
 
-/* adjusts the MTU if a new interface with a smaller MTU appeared. */
+/**
+ * batadv_update_min_mtu() - Adjusts the MTU if a new interface with a smaller
+ *  MTU appeared
+ * @soft_iface: netdev struct of the soft interface
+ */
 void batadv_update_min_mtu(struct net_device *soft_iface)
 {
        soft_iface->mtu = batadv_hardif_min_mtu(soft_iface);
@@ -667,7 +685,7 @@ batadv_hardif_deactivate_interface(struct batadv_hard_iface *hard_iface)
 }
 
 /**
- * batadv_master_del_slave - remove hard_iface from the current master interface
+ * batadv_master_del_slave() - remove hard_iface from the current master iface
  * @slave: the interface enslaved in another master
  * @master: the master from which slave has to be removed
  *
@@ -691,6 +709,14 @@ static int batadv_master_del_slave(struct batadv_hard_iface *slave,
        return ret;
 }
 
+/**
+ * batadv_hardif_enable_interface() - Enslave hard interface to soft interface
+ * @hard_iface: hard interface to add to soft interface
+ * @net: the applicable net namespace
+ * @iface_name: name of the soft interface
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
 int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface,
                                   struct net *net, const char *iface_name)
 {
@@ -802,6 +828,12 @@ err:
        return ret;
 }
 
+/**
+ * batadv_hardif_disable_interface() - Remove hard interface from soft interface
+ * @hard_iface: hard interface to be removed
+ * @autodel: whether to delete soft interface when it doesn't contain any other
+ *  slave interfaces
+ */
 void batadv_hardif_disable_interface(struct batadv_hard_iface *hard_iface,
                                     enum batadv_hard_if_cleanup autodel)
 {
@@ -936,6 +968,9 @@ static void batadv_hardif_remove_interface(struct batadv_hard_iface *hard_iface)
        batadv_hardif_put(hard_iface);
 }
 
+/**
+ * batadv_hardif_remove_interfaces() - Remove all hard interfaces
+ */
 void batadv_hardif_remove_interfaces(void)
 {
        struct batadv_hard_iface *hard_iface, *hard_iface_tmp;
index 9f9890f..de5e9a3 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
 struct net_device;
 struct net;
 
+/**
+ * enum batadv_hard_if_state - State of a hard interface
+ */
 enum batadv_hard_if_state {
+       /**
+        * @BATADV_IF_NOT_IN_USE: interface is not used as slave interface of a
+        * batman-adv soft interface
+        */
        BATADV_IF_NOT_IN_USE,
+
+       /**
+        * @BATADV_IF_TO_BE_REMOVED: interface will be removed from soft
+        * interface
+        */
        BATADV_IF_TO_BE_REMOVED,
+
+       /** @BATADV_IF_INACTIVE: interface is deactivated */
        BATADV_IF_INACTIVE,
+
+       /** @BATADV_IF_ACTIVE: interface is used */
        BATADV_IF_ACTIVE,
+
+       /** @BATADV_IF_TO_BE_ACTIVATED: interface is getting activated */
        BATADV_IF_TO_BE_ACTIVATED,
+
+       /**
+        * @BATADV_IF_I_WANT_YOU: interface is queued up (using sysfs) for being
+        * added as slave interface of a batman-adv soft interface
+        */
        BATADV_IF_I_WANT_YOU,
 };
 
 /**
  * enum batadv_hard_if_bcast - broadcast avoidance options
- * @BATADV_HARDIF_BCAST_OK: Do broadcast on according hard interface
- * @BATADV_HARDIF_BCAST_NORECIPIENT: Broadcast not needed, there is no recipient
- * @BATADV_HARDIF_BCAST_DUPFWD: There is just the neighbor we got it from
- * @BATADV_HARDIF_BCAST_DUPORIG: There is just the originator
  */
 enum batadv_hard_if_bcast {
+       /** @BATADV_HARDIF_BCAST_OK: Do broadcast on according hard interface */
        BATADV_HARDIF_BCAST_OK = 0,
+
+       /**
+        * @BATADV_HARDIF_BCAST_NORECIPIENT: Broadcast not needed, there is no
+        *  recipient
+        */
        BATADV_HARDIF_BCAST_NORECIPIENT,
+
+       /**
+        * @BATADV_HARDIF_BCAST_DUPFWD: There is just the neighbor we got it
+        *  from
+        */
        BATADV_HARDIF_BCAST_DUPFWD,
+
+       /** @BATADV_HARDIF_BCAST_DUPORIG: There is just the originator */
        BATADV_HARDIF_BCAST_DUPORIG,
 };
 
 /**
  * enum batadv_hard_if_cleanup - Cleanup modi for soft_iface after slave removal
- * @BATADV_IF_CLEANUP_KEEP: Don't automatically delete soft-interface
- * @BATADV_IF_CLEANUP_AUTO: Delete soft-interface after last slave was removed
  */
 enum batadv_hard_if_cleanup {
+       /**
+        * @BATADV_IF_CLEANUP_KEEP: Don't automatically delete soft-interface
+        */
        BATADV_IF_CLEANUP_KEEP,
+
+       /**
+        * @BATADV_IF_CLEANUP_AUTO: Delete soft-interface after last slave was
+        *  removed
+        */
        BATADV_IF_CLEANUP_AUTO,
 };
 
@@ -82,7 +121,7 @@ int batadv_hardif_no_broadcast(struct batadv_hard_iface *if_outgoing,
                               u8 *orig_addr, u8 *orig_neigh);
 
 /**
- * batadv_hardif_put - decrement the hard interface refcounter and possibly
+ * batadv_hardif_put() - decrement the hard interface refcounter and possibly
  *  release it
  * @hard_iface: the hard interface to free
  */
@@ -91,6 +130,12 @@ static inline void batadv_hardif_put(struct batadv_hard_iface *hard_iface)
        kref_put(&hard_iface->refcount, batadv_hardif_release);
 }
 
+/**
+ * batadv_primary_if_get_selected() - Get reference to primary interface
+ * @bat_priv: the bat priv with all the soft interface information
+ *
+ * Return: primary interface (with increased refcnt), otherwise NULL
+ */
 static inline struct batadv_hard_iface *
 batadv_primary_if_get_selected(struct batadv_priv *bat_priv)
 {
index b5f7e13..04d9643 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2006-2017  B.A.T.M.A.N. contributors:
  *
  * Simon Wunderlich, Marek Lindner
@@ -18,7 +19,7 @@
 #include "hash.h"
 #include "main.h"
 
-#include <linux/fs.h>
+#include <linux/gfp.h>
 #include <linux/lockdep.h>
 #include <linux/slab.h>
 
@@ -33,7 +34,10 @@ static void batadv_hash_init(struct batadv_hashtable *hash)
        }
 }
 
-/* free only the hashtable and the hash itself. */
+/**
+ * batadv_hash_destroy() - Free only the hashtable and the hash itself
+ * @hash: hash object to destroy
+ */
 void batadv_hash_destroy(struct batadv_hashtable *hash)
 {
        kfree(hash->list_locks);
@@ -41,7 +45,12 @@ void batadv_hash_destroy(struct batadv_hashtable *hash)
        kfree(hash);
 }
 
-/* allocates and clears the hash */
+/**
+ * batadv_hash_new() - Allocates and clears the hashtable
+ * @size: number of hash buckets to allocate
+ *
+ * Return: newly allocated hashtable, NULL on errors
+ */
 struct batadv_hashtable *batadv_hash_new(u32 size)
 {
        struct batadv_hashtable *hash;
@@ -70,6 +79,11 @@ free_hash:
        return NULL;
 }
 
+/**
+ * batadv_hash_set_lock_class() - Set specific lockdep class for hash spinlocks
+ * @hash: hash object to modify
+ * @key: lockdep class key address
+ */
 void batadv_hash_set_lock_class(struct batadv_hashtable *hash,
                                struct lock_class_key *key)
 {
index 0c905e9..4ce1b6d 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2006-2017  B.A.T.M.A.N. contributors:
  *
  * Simon Wunderlich, Marek Lindner
@@ -45,10 +46,18 @@ typedef bool (*batadv_hashdata_compare_cb)(const struct hlist_node *,
 typedef u32 (*batadv_hashdata_choose_cb)(const void *, u32);
 typedef void (*batadv_hashdata_free_cb)(struct hlist_node *, void *);
 
+/**
+ * struct batadv_hashtable - Wrapper of simple hlist based hashtable
+ */
 struct batadv_hashtable {
-       struct hlist_head *table;   /* the hashtable itself with the buckets */
-       spinlock_t *list_locks;     /* spinlock for each hash list entry */
-       u32 size;                   /* size of hashtable */
+       /** @table: the hashtable itself with the buckets */
+       struct hlist_head *table;
+
+       /** @list_locks: spinlock for each hash list entry */
+       spinlock_t *list_locks;
+
+       /** @size: size of hashtable */
+       u32 size;
 };
 
 /* allocates and clears the hash */
@@ -62,7 +71,7 @@ void batadv_hash_set_lock_class(struct batadv_hashtable *hash,
 void batadv_hash_destroy(struct batadv_hashtable *hash);
 
 /**
- *     batadv_hash_add - adds data to the hashtable
+ *     batadv_hash_add() - adds data to the hashtable
  *     @hash: storage hash table
  *     @compare: callback to determine if 2 hash elements are identical
  *     @choose: callback calculating the hash index
@@ -112,8 +121,15 @@ out:
        return ret;
 }
 
-/* removes data from hash, if found. data could be the structure you use with
- * just the key filled, we just need the key for comparing.
+/**
+ * batadv_hash_remove() - Removes data from hash, if found
+ * @hash: hash table
+ * @compare: callback to determine if 2 hash elements are identical
+ * @choose: callback calculating the hash index
+ * @data: data passed to the aforementioned callbacks as argument
+ *
+ * ata could be the structure you use with  just the key filled, we just need
+ * the key for comparing.
  *
  * Return: returns pointer do data on success, so you can remove the used
  * structure yourself, or NULL on error
index bded311..8041cf1 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
@@ -26,6 +27,7 @@
 #include <linux/export.h>
 #include <linux/fcntl.h>
 #include <linux/fs.h>
+#include <linux/gfp.h>
 #include <linux/if_ether.h>
 #include <linux/kernel.h>
 #include <linux/list.h>
 #include <linux/string.h>
 #include <linux/uaccess.h>
 #include <linux/wait.h>
+#include <uapi/linux/batadv_packet.h>
 
 #include "hard-interface.h"
 #include "log.h"
 #include "originator.h"
-#include "packet.h"
 #include "send.h"
 
 static struct batadv_socket_client *batadv_socket_client_hash[256];
@@ -55,6 +57,9 @@ static void batadv_socket_add_packet(struct batadv_socket_client *socket_client,
                                     struct batadv_icmp_header *icmph,
                                     size_t icmp_len);
 
+/**
+ * batadv_socket_init() - Initialize soft interface independent socket data
+ */
 void batadv_socket_init(void)
 {
        memset(batadv_socket_client_hash, 0, sizeof(batadv_socket_client_hash));
@@ -314,6 +319,12 @@ static const struct file_operations batadv_fops = {
        .llseek = no_llseek,
 };
 
+/**
+ * batadv_socket_setup() - Create debugfs "socket" file
+ * @bat_priv: the bat priv with all the soft interface information
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
 int batadv_socket_setup(struct batadv_priv *bat_priv)
 {
        struct dentry *d;
@@ -333,7 +344,7 @@ err:
 }
 
 /**
- * batadv_socket_add_packet - schedule an icmp packet to be sent to
+ * batadv_socket_add_packet() - schedule an icmp packet to be sent to
  *  userspace on an icmp socket.
  * @socket_client: the socket this packet belongs to
  * @icmph: pointer to the header of the icmp packet
@@ -390,7 +401,7 @@ static void batadv_socket_add_packet(struct batadv_socket_client *socket_client,
 }
 
 /**
- * batadv_socket_receive_packet - schedule an icmp packet to be received
+ * batadv_socket_receive_packet() - schedule an icmp packet to be received
  *  locally and sent to userspace.
  * @icmph: pointer to the header of the icmp packet
  * @icmp_len: total length of the icmp packet
index f3fec40..84cddd0 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
index 4ef4bde..da00498 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2010-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
@@ -24,6 +25,7 @@
 #include <linux/export.h>
 #include <linux/fcntl.h>
 #include <linux/fs.h>
+#include <linux/gfp.h>
 #include <linux/jiffies.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
@@ -86,6 +88,13 @@ static int batadv_fdebug_log(struct batadv_priv_debug_log *debug_log,
        return 0;
 }
 
+/**
+ * batadv_debug_log() - Add debug log entry
+ * @bat_priv: the bat priv with all the soft interface information
+ * @fmt: format string
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
 int batadv_debug_log(struct batadv_priv *bat_priv, const char *fmt, ...)
 {
        va_list args;
@@ -197,6 +206,12 @@ static const struct file_operations batadv_log_fops = {
        .llseek         = no_llseek,
 };
 
+/**
+ * batadv_debug_log_setup() - Initialize debug log
+ * @bat_priv: the bat priv with all the soft interface information
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
 int batadv_debug_log_setup(struct batadv_priv *bat_priv)
 {
        struct dentry *d;
@@ -222,6 +237,10 @@ err:
        return -ENOMEM;
 }
 
+/**
+ * batadv_debug_log_cleanup() - Destroy debug log
+ * @bat_priv: the bat priv with all the soft interface information
+ */
 void batadv_debug_log_cleanup(struct batadv_priv *bat_priv)
 {
        kfree(bat_priv->debug_log);
index 65ce97e..35e02b2 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
@@ -44,25 +45,33 @@ static inline void batadv_debug_log_cleanup(struct batadv_priv *bat_priv)
 
 /**
  * enum batadv_dbg_level - available log levels
- * @BATADV_DBG_BATMAN: OGM and TQ computations related messages
- * @BATADV_DBG_ROUTES: route added / changed / deleted
- * @BATADV_DBG_TT: translation table messages
- * @BATADV_DBG_BLA: bridge loop avoidance messages
- * @BATADV_DBG_DAT: ARP snooping and DAT related messages
- * @BATADV_DBG_NC: network coding related messages
- * @BATADV_DBG_MCAST: multicast related messages
- * @BATADV_DBG_TP_METER: throughput meter messages
- * @BATADV_DBG_ALL: the union of all the above log levels
  */
 enum batadv_dbg_level {
+       /** @BATADV_DBG_BATMAN: OGM and TQ computations related messages */
        BATADV_DBG_BATMAN       = BIT(0),
+
+       /** @BATADV_DBG_ROUTES: route added / changed / deleted */
        BATADV_DBG_ROUTES       = BIT(1),
+
+       /** @BATADV_DBG_TT: translation table messages */
        BATADV_DBG_TT           = BIT(2),
+
+       /** @BATADV_DBG_BLA: bridge loop avoidance messages */
        BATADV_DBG_BLA          = BIT(3),
+
+       /** @BATADV_DBG_DAT: ARP snooping and DAT related messages */
        BATADV_DBG_DAT          = BIT(4),
+
+       /** @BATADV_DBG_NC: network coding related messages */
        BATADV_DBG_NC           = BIT(5),
+
+       /** @BATADV_DBG_MCAST: multicast related messages */
        BATADV_DBG_MCAST        = BIT(6),
+
+       /** @BATADV_DBG_TP_METER: throughput meter messages */
        BATADV_DBG_TP_METER     = BIT(7),
+
+       /** @BATADV_DBG_ALL: the union of all the above log levels */
        BATADV_DBG_ALL          = 255,
 };
 
@@ -70,7 +79,14 @@ enum batadv_dbg_level {
 int batadv_debug_log(struct batadv_priv *bat_priv, const char *fmt, ...)
 __printf(2, 3);
 
-/* possibly ratelimited debug output */
+/**
+ * _batadv_dbg() - Store debug output with(out) ratelimiting
+ * @type: type of debug message
+ * @bat_priv: the bat priv with all the soft interface information
+ * @ratelimited: whether output should be rate limited
+ * @fmt: format string
+ * @arg...: variable arguments
+ */
 #define _batadv_dbg(type, bat_priv, ratelimited, fmt, arg...)          \
        do {                                                            \
                struct batadv_priv *__batpriv = (bat_priv);             \
@@ -89,11 +105,30 @@ static inline void _batadv_dbg(int type __always_unused,
 }
 #endif
 
+/**
+ * batadv_dbg() - Store debug output without ratelimiting
+ * @type: type of debug message
+ * @bat_priv: the bat priv with all the soft interface information
+ * @arg...: format string and variable arguments
+ */
 #define batadv_dbg(type, bat_priv, arg...) \
        _batadv_dbg(type, bat_priv, 0, ## arg)
+
+/**
+ * batadv_dbg_ratelimited() - Store debug output with ratelimiting
+ * @type: type of debug message
+ * @bat_priv: the bat priv with all the soft interface information
+ * @arg...: format string and variable arguments
+ */
 #define batadv_dbg_ratelimited(type, bat_priv, arg...) \
        _batadv_dbg(type, bat_priv, 1, ## arg)
 
+/**
+ * batadv_info() - Store message in debug buffer and print it to kmsg buffer
+ * @net_dev: the soft interface net device
+ * @fmt: format string
+ * @arg...: variable arguments
+ */
 #define batadv_info(net_dev, fmt, arg...)                              \
        do {                                                            \
                struct net_device *_netdev = (net_dev);                 \
@@ -101,6 +136,13 @@ static inline void _batadv_dbg(int type __always_unused,
                batadv_dbg(BATADV_DBG_ALL, _batpriv, fmt, ## arg);      \
                pr_info("%s: " fmt, _netdev->name, ## arg);             \
        } while (0)
+
+/**
+ * batadv_err() - Store error in debug buffer and print it to kmsg buffer
+ * @net_dev: the soft interface net device
+ * @fmt: format string
+ * @arg...: variable arguments
+ */
 #define batadv_err(net_dev, fmt, arg...)                               \
        do {                                                            \
                struct net_device *_netdev = (net_dev);                 \
index 4daed7a..d31c826 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
 #include "main.h"
 
 #include <linux/atomic.h>
-#include <linux/bug.h>
+#include <linux/build_bug.h>
 #include <linux/byteorder/generic.h>
 #include <linux/crc32c.h>
 #include <linux/errno.h>
-#include <linux/fs.h>
 #include <linux/genetlink.h>
+#include <linux/gfp.h>
 #include <linux/if_ether.h>
 #include <linux/if_vlan.h>
 #include <linux/init.h>
@@ -45,6 +46,7 @@
 #include <linux/workqueue.h>
 #include <net/dsfield.h>
 #include <net/rtnetlink.h>
+#include <uapi/linux/batadv_packet.h>
 #include <uapi/linux/batman_adv.h>
 
 #include "bat_algo.h"
@@ -62,7 +64,6 @@
 #include "netlink.h"
 #include "network-coding.h"
 #include "originator.h"
-#include "packet.h"
 #include "routing.h"
 #include "send.h"
 #include "soft-interface.h"
@@ -139,6 +140,12 @@ static void __exit batadv_exit(void)
        batadv_tt_cache_destroy();
 }
 
+/**
+ * batadv_mesh_init() - Initialize soft interface
+ * @soft_iface: netdev struct of the soft interface
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
 int batadv_mesh_init(struct net_device *soft_iface)
 {
        struct batadv_priv *bat_priv = netdev_priv(soft_iface);
@@ -216,6 +223,10 @@ err:
        return ret;
 }
 
+/**
+ * batadv_mesh_free() - Deinitialize soft interface
+ * @soft_iface: netdev struct of the soft interface
+ */
 void batadv_mesh_free(struct net_device *soft_iface)
 {
        struct batadv_priv *bat_priv = netdev_priv(soft_iface);
@@ -255,8 +266,8 @@ void batadv_mesh_free(struct net_device *soft_iface)
 }
 
 /**
- * batadv_is_my_mac - check if the given mac address belongs to any of the real
- * interfaces in the current mesh
+ * batadv_is_my_mac() - check if the given mac address belongs to any of the
+ *  real interfaces in the current mesh
  * @bat_priv: the bat priv with all the soft interface information
  * @addr: the address to check
  *
@@ -286,7 +297,7 @@ bool batadv_is_my_mac(struct batadv_priv *bat_priv, const u8 *addr)
 
 #ifdef CONFIG_BATMAN_ADV_DEBUGFS
 /**
- * batadv_seq_print_text_primary_if_get - called from debugfs table printing
+ * batadv_seq_print_text_primary_if_get() - called from debugfs table printing
  *  function that requires the primary interface
  * @seq: debugfs table seq_file struct
  *
@@ -323,7 +334,7 @@ out:
 #endif
 
 /**
- * batadv_max_header_len - calculate maximum encapsulation overhead for a
+ * batadv_max_header_len() - calculate maximum encapsulation overhead for a
  *  payload packet
  *
  * Return: the maximum encapsulation overhead in bytes.
@@ -348,7 +359,7 @@ int batadv_max_header_len(void)
 }
 
 /**
- * batadv_skb_set_priority - sets skb priority according to packet content
+ * batadv_skb_set_priority() - sets skb priority according to packet content
  * @skb: the packet to be sent
  * @offset: offset to the packet content
  *
@@ -412,6 +423,16 @@ static int batadv_recv_unhandled_packet(struct sk_buff *skb,
 /* incoming packets with the batman ethertype received on any active hard
  * interface
  */
+
+/**
+ * batadv_batman_skb_recv() - Handle incoming message from an hard interface
+ * @skb: the received packet
+ * @dev: the net device that the packet was received on
+ * @ptype: packet type of incoming packet (ETH_P_BATMAN)
+ * @orig_dev: the original receive net device (e.g. bonded device)
+ *
+ * Return: NET_RX_SUCCESS on success or NET_RX_DROP in case of failure
+ */
 int batadv_batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
                           struct packet_type *ptype,
                           struct net_device *orig_dev)
@@ -535,6 +556,13 @@ static void batadv_recv_handler_init(void)
        batadv_rx_handler[BATADV_UNICAST_FRAG] = batadv_recv_frag_packet;
 }
 
+/**
+ * batadv_recv_handler_register() - Register handler for batman-adv packet type
+ * @packet_type: batadv_packettype which should be handled
+ * @recv_handler: receive handler for the packet type
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
 int
 batadv_recv_handler_register(u8 packet_type,
                             int (*recv_handler)(struct sk_buff *,
@@ -552,13 +580,17 @@ batadv_recv_handler_register(u8 packet_type,
        return 0;
 }
 
+/**
+ * batadv_recv_handler_unregister() - Unregister handler for packet type
+ * @packet_type: batadv_packettype which should no longer be handled
+ */
 void batadv_recv_handler_unregister(u8 packet_type)
 {
        batadv_rx_handler[packet_type] = batadv_recv_unhandled_packet;
 }
 
 /**
- * batadv_skb_crc32 - calculate CRC32 of the whole packet and skip bytes in
+ * batadv_skb_crc32() - calculate CRC32 of the whole packet and skip bytes in
  *  the header
  * @skb: skb pointing to fragmented socket buffers
  * @payload_ptr: Pointer to position inside the head buffer of the skb
@@ -591,7 +623,7 @@ __be32 batadv_skb_crc32(struct sk_buff *skb, u8 *payload_ptr)
 }
 
 /**
- * batadv_get_vid - extract the VLAN identifier from skb if any
+ * batadv_get_vid() - extract the VLAN identifier from skb if any
  * @skb: the buffer containing the packet
  * @header_len: length of the batman header preceding the ethernet header
  *
@@ -618,7 +650,7 @@ unsigned short batadv_get_vid(struct sk_buff *skb, size_t header_len)
 }
 
 /**
- * batadv_vlan_ap_isola_get - return the AP isolation status for the given vlan
+ * batadv_vlan_ap_isola_get() - return AP isolation status for the given vlan
  * @bat_priv: the bat priv with all the soft interface information
  * @vid: the VLAN identifier for which the AP isolation attributed as to be
  *  looked up
index edb2f23..f7ba3f9 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
@@ -24,7 +25,7 @@
 #define BATADV_DRIVER_DEVICE "batman-adv"
 
 #ifndef BATADV_SOURCE_VERSION
-#define BATADV_SOURCE_VERSION "2017.4"
+#define BATADV_SOURCE_VERSION "2018.0"
 #endif
 
 /* B.A.T.M.A.N. parameters */
  */
 #define BATADV_TP_MAX_NUM 5
 
+/**
+ * enum batadv_mesh_state - State of a soft interface
+ */
 enum batadv_mesh_state {
+       /** @BATADV_MESH_INACTIVE: soft interface is not yet running */
        BATADV_MESH_INACTIVE,
+
+       /** @BATADV_MESH_ACTIVE: interface is up and running */
        BATADV_MESH_ACTIVE,
+
+       /** @BATADV_MESH_DEACTIVATING: interface is getting shut down */
        BATADV_MESH_DEACTIVATING,
 };
 
 #define BATADV_BCAST_QUEUE_LEN         256
 #define BATADV_BATMAN_QUEUE_LEN        256
 
+/**
+ * enum batadv_uev_action - action type of uevent
+ */
 enum batadv_uev_action {
+       /** @BATADV_UEV_ADD: gateway was selected (after none was selected) */
        BATADV_UEV_ADD = 0,
+
+       /**
+        * @BATADV_UEV_DEL: selected gateway was removed and none is selected
+        * anymore
+        */
        BATADV_UEV_DEL,
+
+       /**
+        * @BATADV_UEV_CHANGE: a different gateway was selected as based gateway
+        */
        BATADV_UEV_CHANGE,
+
+       /**
+        * @BATADV_UEV_LOOPDETECT: loop was detected which cannot be handled by
+        * bridge loop avoidance
+        */
        BATADV_UEV_LOOPDETECT,
 };
 
+/**
+ * enum batadv_uev_type - Type of uevent
+ */
 enum batadv_uev_type {
+       /** @BATADV_UEV_GW: selected gateway was modified */
        BATADV_UEV_GW = 0,
+
+       /** @BATADV_UEV_BLA: bridge loop avoidance event */
        BATADV_UEV_BLA,
 };
 
@@ -184,16 +217,14 @@ enum batadv_uev_type {
 
 /* Kernel headers */
 
-#include <linux/bitops.h> /* for packet.h */
 #include <linux/compiler.h>
 #include <linux/etherdevice.h>
-#include <linux/if_ether.h> /* for packet.h */
 #include <linux/if_vlan.h>
 #include <linux/jiffies.h>
 #include <linux/percpu.h>
 #include <linux/types.h>
+#include <uapi/linux/batadv_packet.h>
 
-#include "packet.h"
 #include "types.h"
 
 struct net_device;
@@ -202,7 +233,7 @@ struct seq_file;
 struct sk_buff;
 
 /**
- * batadv_print_vid - return printable version of vid information
+ * batadv_print_vid() - return printable version of vid information
  * @vid: the VLAN identifier
  *
  * Return: -1 when no VLAN is used, VLAN id otherwise
@@ -238,7 +269,7 @@ void batadv_recv_handler_unregister(u8 packet_type);
 __be32 batadv_skb_crc32(struct sk_buff *skb, u8 *payload_ptr);
 
 /**
- * batadv_compare_eth - Compare two not u16 aligned Ethernet addresses
+ * batadv_compare_eth() - Compare two not u16 aligned Ethernet addresses
  * @data1: Pointer to a six-byte array containing the Ethernet address
  * @data2: Pointer other six-byte array containing the Ethernet address
  *
@@ -252,7 +283,7 @@ static inline bool batadv_compare_eth(const void *data1, const void *data2)
 }
 
 /**
- * batadv_has_timed_out - compares current time (jiffies) and timestamp +
+ * batadv_has_timed_out() - compares current time (jiffies) and timestamp +
  *  timeout
  * @timestamp:         base value to compare with (in jiffies)
  * @timeout:           added to base value before comparing (in milliseconds)
@@ -265,40 +296,96 @@ static inline bool batadv_has_timed_out(unsigned long timestamp,
        return time_is_before_jiffies(timestamp + msecs_to_jiffies(timeout));
 }
 
+/**
+ * batadv_atomic_dec_not_zero() - Decrease unless the number is 0
+ * @v: pointer of type atomic_t
+ *
+ * Return: non-zero if v was not 0, and zero otherwise.
+ */
 #define batadv_atomic_dec_not_zero(v)  atomic_add_unless((v), -1, 0)
 
-/* Returns the smallest signed integer in two's complement with the sizeof x */
+/**
+ * batadv_smallest_signed_int() - Returns the smallest signed integer in two's
+ *  complement with the sizeof x
+ * @x: type of integer
+ *
+ * Return: smallest signed integer of type
+ */
 #define batadv_smallest_signed_int(x) (1u << (7u + 8u * (sizeof(x) - 1u)))
 
-/* Checks if a sequence number x is a predecessor/successor of y.
- * they handle overflows/underflows and can correctly check for a
- * predecessor/successor unless the variable sequence number has grown by
- * more then 2**(bitwidth(x)-1)-1.
+/**
+ * batadv_seq_before() - Checks if a sequence number x is a predecessor of y
+ * @x: potential predecessor of @y
+ * @y: value to compare @x against
+ *
+ * It handles overflows/underflows and can correctly check for a predecessor
+ * unless the variable sequence number has grown by more then
+ * 2**(bitwidth(x)-1)-1.
+ *
  * This means that for a u8 with the maximum value 255, it would think:
- *  - when adding nothing - it is neither a predecessor nor a successor
- *  - before adding more than 127 to the starting value - it is a predecessor,
- *  - when adding 128 - it is neither a predecessor nor a successor,
- *  - after adding more than 127 to the starting value - it is a successor
+ *
+ * * when adding nothing - it is neither a predecessor nor a successor
+ * * before adding more than 127 to the starting value - it is a predecessor,
+ * * when adding 128 - it is neither a predecessor nor a successor,
+ * * after adding more than 127 to the starting value - it is a successor
+ *
+ * Return: true when x is a predecessor of y, false otherwise
  */
 #define batadv_seq_before(x, y) ({typeof(x)_d1 = (x); \
                                 typeof(y)_d2 = (y); \
                                 typeof(x)_dummy = (_d1 - _d2); \
                                 (void)(&_d1 == &_d2); \
                                 _dummy > batadv_smallest_signed_int(_dummy); })
+
+/**
+ * batadv_seq_after() - Checks if a sequence number x is a successor of y
+ * @x: potential sucessor of @y
+ * @y: value to compare @x against
+ *
+ * It handles overflows/underflows and can correctly check for a successor
+ * unless the variable sequence number has grown by more then
+ * 2**(bitwidth(x)-1)-1.
+ *
+ * This means that for a u8 with the maximum value 255, it would think:
+ *
+ * * when adding nothing - it is neither a predecessor nor a successor
+ * * before adding more than 127 to the starting value - it is a predecessor,
+ * * when adding 128 - it is neither a predecessor nor a successor,
+ * * after adding more than 127 to the starting value - it is a successor
+ *
+ * Return: true when x is a successor of y, false otherwise
+ */
 #define batadv_seq_after(x, y) batadv_seq_before(y, x)
 
-/* Stop preemption on local cpu while incrementing the counter */
+/**
+ * batadv_add_counter() - Add to per cpu statistics counter of soft interface
+ * @bat_priv: the bat priv with all the soft interface information
+ * @idx: counter index which should be modified
+ * @count: value to increase counter by
+ *
+ * Stop preemption on local cpu while incrementing the counter
+ */
 static inline void batadv_add_counter(struct batadv_priv *bat_priv, size_t idx,
                                      size_t count)
 {
        this_cpu_add(bat_priv->bat_counters[idx], count);
 }
 
+/**
+ * batadv_inc_counter() - Increase per cpu statistics counter of soft interface
+ * @b: the bat priv with all the soft interface information
+ * @i: counter index which should be modified
+ */
 #define batadv_inc_counter(b, i) batadv_add_counter(b, i, 1)
 
-/* Define a macro to reach the control buffer of the skb. The members of the
- * control buffer are defined in struct batadv_skb_cb in types.h.
- * The macro is inspired by the similar macro TCP_SKB_CB() in tcp.h.
+/**
+ * BATADV_SKB_CB() - Get batadv_skb_cb from skb control buffer
+ * @__skb: skb holding the control buffer
+ *
+ * The members of the control buffer are defined in struct batadv_skb_cb in
+ * types.h. The macro is inspired by the similar macro TCP_SKB_CB() in tcp.h.
+ *
+ * Return: pointer to the batadv_skb_cb of the skb
  */
 #define BATADV_SKB_CB(__skb)       ((struct batadv_skb_cb *)&((__skb)->cb[0]))
 
index e553a87..cbdeb47 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2014-2017  B.A.T.M.A.N. contributors:
  *
  * Linus Lüssing
@@ -24,7 +25,7 @@
 #include <linux/byteorder/generic.h>
 #include <linux/errno.h>
 #include <linux/etherdevice.h>
-#include <linux/fs.h>
+#include <linux/gfp.h>
 #include <linux/icmpv6.h>
 #include <linux/if_bridge.h>
 #include <linux/if_ether.h>
 #include <net/if_inet6.h>
 #include <net/ip.h>
 #include <net/ipv6.h>
+#include <uapi/linux/batadv_packet.h>
 
 #include "hard-interface.h"
 #include "hash.h"
 #include "log.h"
-#include "packet.h"
 #include "translation-table.h"
 #include "tvlv.h"
 
 static void batadv_mcast_mla_update(struct work_struct *work);
 
 /**
- * batadv_mcast_start_timer - schedule the multicast periodic worker
+ * batadv_mcast_start_timer() - schedule the multicast periodic worker
  * @bat_priv: the bat priv with all the soft interface information
  */
 static void batadv_mcast_start_timer(struct batadv_priv *bat_priv)
@@ -75,7 +76,7 @@ static void batadv_mcast_start_timer(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_mcast_get_bridge - get the bridge on top of the softif if it exists
+ * batadv_mcast_get_bridge() - get the bridge on top of the softif if it exists
  * @soft_iface: netdev struct of the mesh interface
  *
  * If the given soft interface has a bridge on top then the refcount
@@ -101,7 +102,7 @@ static struct net_device *batadv_mcast_get_bridge(struct net_device *soft_iface)
 }
 
 /**
- * batadv_mcast_mla_softif_get - get softif multicast listeners
+ * batadv_mcast_mla_softif_get() - get softif multicast listeners
  * @dev: the device to collect multicast addresses from
  * @mcast_list: a list to put found addresses into
  *
@@ -147,7 +148,7 @@ static int batadv_mcast_mla_softif_get(struct net_device *dev,
 }
 
 /**
- * batadv_mcast_mla_is_duplicate - check whether an address is in a list
+ * batadv_mcast_mla_is_duplicate() - check whether an address is in a list
  * @mcast_addr: the multicast address to check
  * @mcast_list: the list with multicast addresses to search in
  *
@@ -167,7 +168,7 @@ static bool batadv_mcast_mla_is_duplicate(u8 *mcast_addr,
 }
 
 /**
- * batadv_mcast_mla_br_addr_cpy - copy a bridge multicast address
+ * batadv_mcast_mla_br_addr_cpy() - copy a bridge multicast address
  * @dst: destination to write to - a multicast MAC address
  * @src: source to read from - a multicast IP address
  *
@@ -191,7 +192,7 @@ static void batadv_mcast_mla_br_addr_cpy(char *dst, const struct br_ip *src)
 }
 
 /**
- * batadv_mcast_mla_bridge_get - get bridged-in multicast listeners
+ * batadv_mcast_mla_bridge_get() - get bridged-in multicast listeners
  * @dev: a bridge slave whose bridge to collect multicast addresses from
  * @mcast_list: a list to put found addresses into
  *
@@ -244,7 +245,7 @@ out:
 }
 
 /**
- * batadv_mcast_mla_list_free - free a list of multicast addresses
+ * batadv_mcast_mla_list_free() - free a list of multicast addresses
  * @mcast_list: the list to free
  *
  * Removes and frees all items in the given mcast_list.
@@ -261,7 +262,7 @@ static void batadv_mcast_mla_list_free(struct hlist_head *mcast_list)
 }
 
 /**
- * batadv_mcast_mla_tt_retract - clean up multicast listener announcements
+ * batadv_mcast_mla_tt_retract() - clean up multicast listener announcements
  * @bat_priv: the bat priv with all the soft interface information
  * @mcast_list: a list of addresses which should _not_ be removed
  *
@@ -297,7 +298,7 @@ static void batadv_mcast_mla_tt_retract(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_mcast_mla_tt_add - add multicast listener announcements
+ * batadv_mcast_mla_tt_add() - add multicast listener announcements
  * @bat_priv: the bat priv with all the soft interface information
  * @mcast_list: a list of addresses which are going to get added
  *
@@ -333,7 +334,7 @@ static void batadv_mcast_mla_tt_add(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_mcast_has_bridge - check whether the soft-iface is bridged
+ * batadv_mcast_has_bridge() - check whether the soft-iface is bridged
  * @bat_priv: the bat priv with all the soft interface information
  *
  * Checks whether there is a bridge on top of our soft interface.
@@ -354,7 +355,8 @@ static bool batadv_mcast_has_bridge(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_mcast_querier_log - debug output regarding the querier status on link
+ * batadv_mcast_querier_log() - debug output regarding the querier status on
+ *  link
  * @bat_priv: the bat priv with all the soft interface information
  * @str_proto: a string for the querier protocol (e.g. "IGMP" or "MLD")
  * @old_state: the previous querier state on our link
@@ -405,7 +407,8 @@ batadv_mcast_querier_log(struct batadv_priv *bat_priv, char *str_proto,
 }
 
 /**
- * batadv_mcast_bridge_log - debug output for topology changes in bridged setups
+ * batadv_mcast_bridge_log() - debug output for topology changes in bridged
+ *  setups
  * @bat_priv: the bat priv with all the soft interface information
  * @bridged: a flag about whether the soft interface is currently bridged or not
  * @querier_ipv4: (maybe) new status of a potential, selected IGMP querier
@@ -444,7 +447,7 @@ batadv_mcast_bridge_log(struct batadv_priv *bat_priv, bool bridged,
 }
 
 /**
- * batadv_mcast_flags_logs - output debug information about mcast flag changes
+ * batadv_mcast_flags_logs() - output debug information about mcast flag changes
  * @bat_priv: the bat priv with all the soft interface information
  * @flags: flags indicating the new multicast state
  *
@@ -470,7 +473,7 @@ static void batadv_mcast_flags_log(struct batadv_priv *bat_priv, u8 flags)
 }
 
 /**
- * batadv_mcast_mla_tvlv_update - update multicast tvlv
+ * batadv_mcast_mla_tvlv_update() - update multicast tvlv
  * @bat_priv: the bat priv with all the soft interface information
  *
  * Updates the own multicast tvlv with our current multicast related settings,
@@ -545,7 +548,7 @@ update:
 }
 
 /**
- * __batadv_mcast_mla_update - update the own MLAs
+ * __batadv_mcast_mla_update() - update the own MLAs
  * @bat_priv: the bat priv with all the soft interface information
  *
  * Updates the own multicast listener announcements in the translation
@@ -582,7 +585,7 @@ out:
 }
 
 /**
- * batadv_mcast_mla_update - update the own MLAs
+ * batadv_mcast_mla_update() - update the own MLAs
  * @work: kernel work struct
  *
  * Updates the own multicast listener announcements in the translation
@@ -605,7 +608,7 @@ static void batadv_mcast_mla_update(struct work_struct *work)
 }
 
 /**
- * batadv_mcast_is_report_ipv4 - check for IGMP reports
+ * batadv_mcast_is_report_ipv4() - check for IGMP reports
  * @skb: the ethernet frame destined for the mesh
  *
  * This call might reallocate skb data.
@@ -630,7 +633,8 @@ static bool batadv_mcast_is_report_ipv4(struct sk_buff *skb)
 }
 
 /**
- * batadv_mcast_forw_mode_check_ipv4 - check for optimized forwarding potential
+ * batadv_mcast_forw_mode_check_ipv4() - check for optimized forwarding
+ *  potential
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: the IPv4 packet to check
  * @is_unsnoopable: stores whether the destination is snoopable
@@ -671,7 +675,7 @@ static int batadv_mcast_forw_mode_check_ipv4(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_mcast_is_report_ipv6 - check for MLD reports
+ * batadv_mcast_is_report_ipv6() - check for MLD reports
  * @skb: the ethernet frame destined for the mesh
  *
  * This call might reallocate skb data.
@@ -695,7 +699,8 @@ static bool batadv_mcast_is_report_ipv6(struct sk_buff *skb)
 }
 
 /**
- * batadv_mcast_forw_mode_check_ipv6 - check for optimized forwarding potential
+ * batadv_mcast_forw_mode_check_ipv6() - check for optimized forwarding
+ *  potential
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: the IPv6 packet to check
  * @is_unsnoopable: stores whether the destination is snoopable
@@ -736,7 +741,7 @@ static int batadv_mcast_forw_mode_check_ipv6(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_mcast_forw_mode_check - check for optimized forwarding potential
+ * batadv_mcast_forw_mode_check() - check for optimized forwarding potential
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: the multicast frame to check
  * @is_unsnoopable: stores whether the destination is snoopable
@@ -774,7 +779,7 @@ static int batadv_mcast_forw_mode_check(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_mcast_forw_want_all_ip_count - count nodes with unspecific mcast
+ * batadv_mcast_forw_want_all_ip_count() - count nodes with unspecific mcast
  *  interest
  * @bat_priv: the bat priv with all the soft interface information
  * @ethhdr: ethernet header of a packet
@@ -798,7 +803,7 @@ static int batadv_mcast_forw_want_all_ip_count(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_mcast_forw_tt_node_get - get a multicast tt node
+ * batadv_mcast_forw_tt_node_get() - get a multicast tt node
  * @bat_priv: the bat priv with all the soft interface information
  * @ethhdr: the ether header containing the multicast destination
  *
@@ -814,7 +819,7 @@ batadv_mcast_forw_tt_node_get(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_mcast_forw_ipv4_node_get - get a node with an ipv4 flag
+ * batadv_mcast_forw_ipv4_node_get() - get a node with an ipv4 flag
  * @bat_priv: the bat priv with all the soft interface information
  *
  * Return: an orig_node which has the BATADV_MCAST_WANT_ALL_IPV4 flag set and
@@ -841,7 +846,7 @@ batadv_mcast_forw_ipv4_node_get(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_mcast_forw_ipv6_node_get - get a node with an ipv6 flag
+ * batadv_mcast_forw_ipv6_node_get() - get a node with an ipv6 flag
  * @bat_priv: the bat priv with all the soft interface information
  *
  * Return: an orig_node which has the BATADV_MCAST_WANT_ALL_IPV6 flag set
@@ -868,7 +873,7 @@ batadv_mcast_forw_ipv6_node_get(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_mcast_forw_ip_node_get - get a node with an ipv4/ipv6 flag
+ * batadv_mcast_forw_ip_node_get() - get a node with an ipv4/ipv6 flag
  * @bat_priv: the bat priv with all the soft interface information
  * @ethhdr: an ethernet header to determine the protocol family from
  *
@@ -892,7 +897,7 @@ batadv_mcast_forw_ip_node_get(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_mcast_forw_unsnoop_node_get - get a node with an unsnoopable flag
+ * batadv_mcast_forw_unsnoop_node_get() - get a node with an unsnoopable flag
  * @bat_priv: the bat priv with all the soft interface information
  *
  * Return: an orig_node which has the BATADV_MCAST_WANT_ALL_UNSNOOPABLES flag
@@ -919,7 +924,7 @@ batadv_mcast_forw_unsnoop_node_get(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_mcast_forw_mode - check on how to forward a multicast packet
+ * batadv_mcast_forw_mode() - check on how to forward a multicast packet
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: The multicast packet to check
  * @orig: an originator to be set to forward the skb to
@@ -973,7 +978,7 @@ batadv_mcast_forw_mode(struct batadv_priv *bat_priv, struct sk_buff *skb,
 }
 
 /**
- * batadv_mcast_want_unsnoop_update - update unsnoop counter and list
+ * batadv_mcast_want_unsnoop_update() - update unsnoop counter and list
  * @bat_priv: the bat priv with all the soft interface information
  * @orig: the orig_node which multicast state might have changed of
  * @mcast_flags: flags indicating the new multicast state
@@ -1018,7 +1023,7 @@ static void batadv_mcast_want_unsnoop_update(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_mcast_want_ipv4_update - update want-all-ipv4 counter and list
+ * batadv_mcast_want_ipv4_update() - update want-all-ipv4 counter and list
  * @bat_priv: the bat priv with all the soft interface information
  * @orig: the orig_node which multicast state might have changed of
  * @mcast_flags: flags indicating the new multicast state
@@ -1063,7 +1068,7 @@ static void batadv_mcast_want_ipv4_update(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_mcast_want_ipv6_update - update want-all-ipv6 counter and list
+ * batadv_mcast_want_ipv6_update() - update want-all-ipv6 counter and list
  * @bat_priv: the bat priv with all the soft interface information
  * @orig: the orig_node which multicast state might have changed of
  * @mcast_flags: flags indicating the new multicast state
@@ -1108,7 +1113,7 @@ static void batadv_mcast_want_ipv6_update(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_mcast_tvlv_ogm_handler - process incoming multicast tvlv container
+ * batadv_mcast_tvlv_ogm_handler() - process incoming multicast tvlv container
  * @bat_priv: the bat priv with all the soft interface information
  * @orig: the orig_node of the ogm
  * @flags: flags indicating the tvlv state (see batadv_tvlv_handler_flags)
@@ -1164,7 +1169,7 @@ static void batadv_mcast_tvlv_ogm_handler(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_mcast_init - initialize the multicast optimizations structures
+ * batadv_mcast_init() - initialize the multicast optimizations structures
  * @bat_priv: the bat priv with all the soft interface information
  */
 void batadv_mcast_init(struct batadv_priv *bat_priv)
@@ -1179,7 +1184,7 @@ void batadv_mcast_init(struct batadv_priv *bat_priv)
 
 #ifdef CONFIG_BATMAN_ADV_DEBUGFS
 /**
- * batadv_mcast_flags_print_header - print own mcast flags to debugfs table
+ * batadv_mcast_flags_print_header() - print own mcast flags to debugfs table
  * @bat_priv: the bat priv with all the soft interface information
  * @seq: debugfs table seq_file struct
  *
@@ -1220,7 +1225,7 @@ static void batadv_mcast_flags_print_header(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_mcast_flags_seq_print_text - print the mcast flags of other nodes
+ * batadv_mcast_flags_seq_print_text() - print the mcast flags of other nodes
  * @seq: seq file to print on
  * @offset: not used
  *
@@ -1281,7 +1286,7 @@ int batadv_mcast_flags_seq_print_text(struct seq_file *seq, void *offset)
 #endif
 
 /**
- * batadv_mcast_free - free the multicast optimizations structures
+ * batadv_mcast_free() - free the multicast optimizations structures
  * @bat_priv: the bat priv with all the soft interface information
  */
 void batadv_mcast_free(struct batadv_priv *bat_priv)
@@ -1296,7 +1301,7 @@ void batadv_mcast_free(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_mcast_purge_orig - reset originator global mcast state modifications
+ * batadv_mcast_purge_orig() - reset originator global mcast state modifications
  * @orig: the originator which is going to get purged
  */
 void batadv_mcast_purge_orig(struct batadv_orig_node *orig)
index 2a78cdd..3ac0633 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2014-2017  B.A.T.M.A.N. contributors:
  *
  * Linus Lüssing
@@ -25,15 +26,21 @@ struct sk_buff;
 
 /**
  * enum batadv_forw_mode - the way a packet should be forwarded as
- * @BATADV_FORW_ALL: forward the packet to all nodes (currently via classic
- *  flooding)
- * @BATADV_FORW_SINGLE: forward the packet to a single node (currently via the
- *  BATMAN unicast routing protocol)
- * @BATADV_FORW_NONE: don't forward, drop it
  */
 enum batadv_forw_mode {
+       /**
+        * @BATADV_FORW_ALL: forward the packet to all nodes (currently via
+        *  classic flooding)
+        */
        BATADV_FORW_ALL,
+
+       /**
+        * @BATADV_FORW_SINGLE: forward the packet to a single node (currently
+        *  via the BATMAN unicast routing protocol)
+        */
        BATADV_FORW_SINGLE,
+
+       /** @BATADV_FORW_NONE: don't forward, drop it */
        BATADV_FORW_NONE,
 };
 
index ab13b4d..a823d38 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2016-2017  B.A.T.M.A.N. contributors:
  *
  * Matthias Schiffer
@@ -23,8 +24,8 @@
 #include <linux/cache.h>
 #include <linux/errno.h>
 #include <linux/export.h>
-#include <linux/fs.h>
 #include <linux/genetlink.h>
+#include <linux/gfp.h>
 #include <linux/if_ether.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
@@ -39,6 +40,7 @@
 #include <net/genetlink.h>
 #include <net/netlink.h>
 #include <net/sock.h>
+#include <uapi/linux/batadv_packet.h>
 #include <uapi/linux/batman_adv.h>
 
 #include "bat_algo.h"
@@ -46,7 +48,6 @@
 #include "gateway_client.h"
 #include "hard-interface.h"
 #include "originator.h"
-#include "packet.h"
 #include "soft-interface.h"
 #include "tp_meter.h"
 #include "translation-table.h"
@@ -99,7 +100,7 @@ static const struct nla_policy batadv_netlink_policy[NUM_BATADV_ATTR] = {
 };
 
 /**
- * batadv_netlink_get_ifindex - Extract an interface index from a message
+ * batadv_netlink_get_ifindex() - Extract an interface index from a message
  * @nlh: Message header
  * @attrtype: Attribute which holds an interface index
  *
@@ -114,7 +115,7 @@ batadv_netlink_get_ifindex(const struct nlmsghdr *nlh, int attrtype)
 }
 
 /**
- * batadv_netlink_mesh_info_put - fill in generic information about mesh
+ * batadv_netlink_mesh_info_put() - fill in generic information about mesh
  *  interface
  * @msg: netlink message to be sent back
  * @soft_iface: interface for which the data should be taken
@@ -169,7 +170,7 @@ batadv_netlink_mesh_info_put(struct sk_buff *msg, struct net_device *soft_iface)
 }
 
 /**
- * batadv_netlink_get_mesh_info - handle incoming BATADV_CMD_GET_MESH_INFO
+ * batadv_netlink_get_mesh_info() - handle incoming BATADV_CMD_GET_MESH_INFO
  *  netlink request
  * @skb: received netlink message
  * @info: receiver information
@@ -230,7 +231,7 @@ batadv_netlink_get_mesh_info(struct sk_buff *skb, struct genl_info *info)
 }
 
 /**
- * batadv_netlink_tp_meter_put - Fill information of started tp_meter session
+ * batadv_netlink_tp_meter_put() - Fill information of started tp_meter session
  * @msg: netlink message to be sent back
  * @cookie: tp meter session cookie
  *
@@ -246,7 +247,7 @@ batadv_netlink_tp_meter_put(struct sk_buff *msg, u32 cookie)
 }
 
 /**
- * batadv_netlink_tpmeter_notify - send tp_meter result via netlink to client
+ * batadv_netlink_tpmeter_notify() - send tp_meter result via netlink to client
  * @bat_priv: the bat priv with all the soft interface information
  * @dst: destination of tp_meter session
  * @result: reason for tp meter session stop
@@ -309,7 +310,7 @@ err_genlmsg:
 }
 
 /**
- * batadv_netlink_tp_meter_start - Start a new tp_meter session
+ * batadv_netlink_tp_meter_start() - Start a new tp_meter session
  * @skb: received netlink message
  * @info: receiver information
  *
@@ -386,7 +387,7 @@ batadv_netlink_tp_meter_start(struct sk_buff *skb, struct genl_info *info)
 }
 
 /**
- * batadv_netlink_tp_meter_start - Cancel a running tp_meter session
+ * batadv_netlink_tp_meter_start() - Cancel a running tp_meter session
  * @skb: received netlink message
  * @info: receiver information
  *
@@ -431,7 +432,7 @@ out:
 }
 
 /**
- * batadv_netlink_dump_hardif_entry - Dump one hard interface into a message
+ * batadv_netlink_dump_hardif_entry() - Dump one hard interface into a message
  * @msg: Netlink message to dump into
  * @portid: Port making netlink request
  * @seq: Sequence number of netlink message
@@ -473,7 +474,7 @@ batadv_netlink_dump_hardif_entry(struct sk_buff *msg, u32 portid, u32 seq,
 }
 
 /**
- * batadv_netlink_dump_hardifs - Dump all hard interface into a messages
+ * batadv_netlink_dump_hardifs() - Dump all hard interface into a messages
  * @msg: Netlink message to dump into
  * @cb: Parameters from query
  *
@@ -620,7 +621,7 @@ struct genl_family batadv_netlink_family __ro_after_init = {
 };
 
 /**
- * batadv_netlink_register - register batadv genl netlink family
+ * batadv_netlink_register() - register batadv genl netlink family
  */
 void __init batadv_netlink_register(void)
 {
@@ -632,7 +633,7 @@ void __init batadv_netlink_register(void)
 }
 
 /**
- * batadv_netlink_unregister - unregister batadv genl netlink family
+ * batadv_netlink_unregister() - unregister batadv genl netlink family
  */
 void batadv_netlink_unregister(void)
 {
index f1cd8c5..0e7e57b 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2016-2017  B.A.T.M.A.N. contributors:
  *
  * Matthias Schiffer
index 3604d78..b48116b 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2012-2017  B.A.T.M.A.N. contributors:
  *
  * Martin Hundebøll, Jeppe Ledet-Pedersen
@@ -25,7 +26,7 @@
 #include <linux/debugfs.h>
 #include <linux/errno.h>
 #include <linux/etherdevice.h>
-#include <linux/fs.h>
+#include <linux/gfp.h>
 #include <linux/if_ether.h>
 #include <linux/if_packet.h>
 #include <linux/init.h>
@@ -35,6 +36,7 @@
 #include <linux/kref.h>
 #include <linux/list.h>
 #include <linux/lockdep.h>
+#include <linux/net.h>
 #include <linux/netdevice.h>
 #include <linux/printk.h>
 #include <linux/random.h>
 #include <linux/stddef.h>
 #include <linux/string.h>
 #include <linux/workqueue.h>
+#include <uapi/linux/batadv_packet.h>
 
 #include "hard-interface.h"
 #include "hash.h"
 #include "log.h"
 #include "originator.h"
-#include "packet.h"
 #include "routing.h"
 #include "send.h"
 #include "tvlv.h"
@@ -65,7 +67,7 @@ static int batadv_nc_recv_coded_packet(struct sk_buff *skb,
                                       struct batadv_hard_iface *recv_if);
 
 /**
- * batadv_nc_init - one-time initialization for network coding
+ * batadv_nc_init() - one-time initialization for network coding
  *
  * Return: 0 on success or negative error number in case of failure
  */
@@ -81,7 +83,7 @@ int __init batadv_nc_init(void)
 }
 
 /**
- * batadv_nc_start_timer - initialise the nc periodic worker
+ * batadv_nc_start_timer() - initialise the nc periodic worker
  * @bat_priv: the bat priv with all the soft interface information
  */
 static void batadv_nc_start_timer(struct batadv_priv *bat_priv)
@@ -91,7 +93,7 @@ static void batadv_nc_start_timer(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_nc_tvlv_container_update - update the network coding tvlv container
+ * batadv_nc_tvlv_container_update() - update the network coding tvlv container
  *  after network coding setting change
  * @bat_priv: the bat priv with all the soft interface information
  */
@@ -113,7 +115,7 @@ static void batadv_nc_tvlv_container_update(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_nc_status_update - update the network coding tvlv container after
+ * batadv_nc_status_update() - update the network coding tvlv container after
  *  network coding setting change
  * @net_dev: the soft interface net device
  */
@@ -125,7 +127,7 @@ void batadv_nc_status_update(struct net_device *net_dev)
 }
 
 /**
- * batadv_nc_tvlv_ogm_handler_v1 - process incoming nc tvlv container
+ * batadv_nc_tvlv_ogm_handler_v1() - process incoming nc tvlv container
  * @bat_priv: the bat priv with all the soft interface information
  * @orig: the orig_node of the ogm
  * @flags: flags indicating the tvlv state (see batadv_tvlv_handler_flags)
@@ -144,7 +146,7 @@ static void batadv_nc_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_nc_mesh_init - initialise coding hash table and start house keeping
+ * batadv_nc_mesh_init() - initialise coding hash table and start house keeping
  * @bat_priv: the bat priv with all the soft interface information
  *
  * Return: 0 on success or negative error number in case of failure
@@ -185,7 +187,7 @@ err:
 }
 
 /**
- * batadv_nc_init_bat_priv - initialise the nc specific bat_priv variables
+ * batadv_nc_init_bat_priv() - initialise the nc specific bat_priv variables
  * @bat_priv: the bat priv with all the soft interface information
  */
 void batadv_nc_init_bat_priv(struct batadv_priv *bat_priv)
@@ -197,7 +199,7 @@ void batadv_nc_init_bat_priv(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_nc_init_orig - initialise the nc fields of an orig_node
+ * batadv_nc_init_orig() - initialise the nc fields of an orig_node
  * @orig_node: the orig_node which is going to be initialised
  */
 void batadv_nc_init_orig(struct batadv_orig_node *orig_node)
@@ -209,8 +211,8 @@ void batadv_nc_init_orig(struct batadv_orig_node *orig_node)
 }
 
 /**
- * batadv_nc_node_release - release nc_node from lists and queue for free after
- *  rcu grace period
+ * batadv_nc_node_release() - release nc_node from lists and queue for free
+ *  after rcu grace period
  * @ref: kref pointer of the nc_node
  */
 static void batadv_nc_node_release(struct kref *ref)
@@ -224,7 +226,7 @@ static void batadv_nc_node_release(struct kref *ref)
 }
 
 /**
- * batadv_nc_node_put - decrement the nc_node refcounter and possibly
+ * batadv_nc_node_put() - decrement the nc_node refcounter and possibly
  *  release it
  * @nc_node: nc_node to be free'd
  */
@@ -234,8 +236,8 @@ static void batadv_nc_node_put(struct batadv_nc_node *nc_node)
 }
 
 /**
- * batadv_nc_path_release - release nc_path from lists and queue for free after
- *  rcu grace period
+ * batadv_nc_path_release() - release nc_path from lists and queue for free
+ *  after rcu grace period
  * @ref: kref pointer of the nc_path
  */
 static void batadv_nc_path_release(struct kref *ref)
@@ -248,7 +250,7 @@ static void batadv_nc_path_release(struct kref *ref)
 }
 
 /**
- * batadv_nc_path_put - decrement the nc_path refcounter and possibly
+ * batadv_nc_path_put() - decrement the nc_path refcounter and possibly
  *  release it
  * @nc_path: nc_path to be free'd
  */
@@ -258,7 +260,7 @@ static void batadv_nc_path_put(struct batadv_nc_path *nc_path)
 }
 
 /**
- * batadv_nc_packet_free - frees nc packet
+ * batadv_nc_packet_free() - frees nc packet
  * @nc_packet: the nc packet to free
  * @dropped: whether the packet is freed because is is dropped
  */
@@ -275,7 +277,7 @@ static void batadv_nc_packet_free(struct batadv_nc_packet *nc_packet,
 }
 
 /**
- * batadv_nc_to_purge_nc_node - checks whether an nc node has to be purged
+ * batadv_nc_to_purge_nc_node() - checks whether an nc node has to be purged
  * @bat_priv: the bat priv with all the soft interface information
  * @nc_node: the nc node to check
  *
@@ -291,7 +293,7 @@ static bool batadv_nc_to_purge_nc_node(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_nc_to_purge_nc_path_coding - checks whether an nc path has timed out
+ * batadv_nc_to_purge_nc_path_coding() - checks whether an nc path has timed out
  * @bat_priv: the bat priv with all the soft interface information
  * @nc_path: the nc path to check
  *
@@ -311,7 +313,8 @@ static bool batadv_nc_to_purge_nc_path_coding(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_nc_to_purge_nc_path_decoding - checks whether an nc path has timed out
+ * batadv_nc_to_purge_nc_path_decoding() - checks whether an nc path has timed
+ *  out
  * @bat_priv: the bat priv with all the soft interface information
  * @nc_path: the nc path to check
  *
@@ -331,7 +334,7 @@ static bool batadv_nc_to_purge_nc_path_decoding(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_nc_purge_orig_nc_nodes - go through list of nc nodes and purge stale
+ * batadv_nc_purge_orig_nc_nodes() - go through list of nc nodes and purge stale
  *  entries
  * @bat_priv: the bat priv with all the soft interface information
  * @list: list of nc nodes
@@ -369,7 +372,7 @@ batadv_nc_purge_orig_nc_nodes(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_nc_purge_orig - purges all nc node data attached of the given
+ * batadv_nc_purge_orig() - purges all nc node data attached of the given
  *  originator
  * @bat_priv: the bat priv with all the soft interface information
  * @orig_node: orig_node with the nc node entries to be purged
@@ -395,8 +398,8 @@ void batadv_nc_purge_orig(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_nc_purge_orig_hash - traverse entire originator hash to check if they
- *  have timed out nc nodes
+ * batadv_nc_purge_orig_hash() - traverse entire originator hash to check if
+ *  they have timed out nc nodes
  * @bat_priv: the bat priv with all the soft interface information
  */
 static void batadv_nc_purge_orig_hash(struct batadv_priv *bat_priv)
@@ -422,7 +425,7 @@ static void batadv_nc_purge_orig_hash(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_nc_purge_paths - traverse all nc paths part of the hash and remove
+ * batadv_nc_purge_paths() - traverse all nc paths part of the hash and remove
  *  unused ones
  * @bat_priv: the bat priv with all the soft interface information
  * @hash: hash table containing the nc paths to check
@@ -481,7 +484,7 @@ static void batadv_nc_purge_paths(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_nc_hash_key_gen - computes the nc_path hash key
+ * batadv_nc_hash_key_gen() - computes the nc_path hash key
  * @key: buffer to hold the final hash key
  * @src: source ethernet mac address going into the hash key
  * @dst: destination ethernet mac address going into the hash key
@@ -494,7 +497,7 @@ static void batadv_nc_hash_key_gen(struct batadv_nc_path *key, const char *src,
 }
 
 /**
- * batadv_nc_hash_choose - compute the hash value for an nc path
+ * batadv_nc_hash_choose() - compute the hash value for an nc path
  * @data: data to hash
  * @size: size of the hash table
  *
@@ -512,7 +515,7 @@ static u32 batadv_nc_hash_choose(const void *data, u32 size)
 }
 
 /**
- * batadv_nc_hash_compare - comparing function used in the network coding hash
+ * batadv_nc_hash_compare() - comparing function used in the network coding hash
  *  tables
  * @node: node in the local table
  * @data2: second object to compare the node to
@@ -538,7 +541,7 @@ static bool batadv_nc_hash_compare(const struct hlist_node *node,
 }
 
 /**
- * batadv_nc_hash_find - search for an existing nc path and return it
+ * batadv_nc_hash_find() - search for an existing nc path and return it
  * @hash: hash table containing the nc path
  * @data: search key
  *
@@ -575,7 +578,7 @@ batadv_nc_hash_find(struct batadv_hashtable *hash,
 }
 
 /**
- * batadv_nc_send_packet - send non-coded packet and free nc_packet struct
+ * batadv_nc_send_packet() - send non-coded packet and free nc_packet struct
  * @nc_packet: the nc packet to send
  */
 static void batadv_nc_send_packet(struct batadv_nc_packet *nc_packet)
@@ -586,7 +589,7 @@ static void batadv_nc_send_packet(struct batadv_nc_packet *nc_packet)
 }
 
 /**
- * batadv_nc_sniffed_purge - Checks timestamp of given sniffed nc_packet.
+ * batadv_nc_sniffed_purge() - Checks timestamp of given sniffed nc_packet.
  * @bat_priv: the bat priv with all the soft interface information
  * @nc_path: the nc path the packet belongs to
  * @nc_packet: the nc packet to be checked
@@ -625,7 +628,7 @@ out:
 }
 
 /**
- * batadv_nc_fwd_flush - Checks the timestamp of the given nc packet.
+ * batadv_nc_fwd_flush() - Checks the timestamp of the given nc packet.
  * @bat_priv: the bat priv with all the soft interface information
  * @nc_path: the nc path the packet belongs to
  * @nc_packet: the nc packet to be checked
@@ -663,8 +666,8 @@ static bool batadv_nc_fwd_flush(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_nc_process_nc_paths - traverse given nc packet pool and free timed out
- *  nc packets
+ * batadv_nc_process_nc_paths() - traverse given nc packet pool and free timed
+ *  out nc packets
  * @bat_priv: the bat priv with all the soft interface information
  * @hash: to be processed hash table
  * @process_fn: Function called to process given nc packet. Should return true
@@ -709,7 +712,8 @@ batadv_nc_process_nc_paths(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_nc_worker - periodic task for house keeping related to network coding
+ * batadv_nc_worker() - periodic task for house keeping related to network
+ *  coding
  * @work: kernel work struct
  */
 static void batadv_nc_worker(struct work_struct *work)
@@ -749,8 +753,8 @@ static void batadv_nc_worker(struct work_struct *work)
 }
 
 /**
- * batadv_can_nc_with_orig - checks whether the given orig node is suitable for
- *  coding or not
+ * batadv_can_nc_with_orig() - checks whether the given orig node is suitable
+ *  for coding or not
  * @bat_priv: the bat priv with all the soft interface information
  * @orig_node: neighboring orig node which may be used as nc candidate
  * @ogm_packet: incoming ogm packet also used for the checks
@@ -790,7 +794,7 @@ static bool batadv_can_nc_with_orig(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_nc_find_nc_node - search for an existing nc node and return it
+ * batadv_nc_find_nc_node() - search for an existing nc node and return it
  * @orig_node: orig node originating the ogm packet
  * @orig_neigh_node: neighboring orig node from which we received the ogm packet
  *  (can be equal to orig_node)
@@ -830,7 +834,7 @@ batadv_nc_find_nc_node(struct batadv_orig_node *orig_node,
 }
 
 /**
- * batadv_nc_get_nc_node - retrieves an nc node or creates the entry if it was
+ * batadv_nc_get_nc_node() - retrieves an nc node or creates the entry if it was
  *  not found
  * @bat_priv: the bat priv with all the soft interface information
  * @orig_node: orig node originating the ogm packet
@@ -890,7 +894,7 @@ batadv_nc_get_nc_node(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_nc_update_nc_node - updates stored incoming and outgoing nc node
+ * batadv_nc_update_nc_node() - updates stored incoming and outgoing nc node
  *  structs (best called on incoming OGMs)
  * @bat_priv: the bat priv with all the soft interface information
  * @orig_node: orig node originating the ogm packet
@@ -945,7 +949,7 @@ out:
 }
 
 /**
- * batadv_nc_get_path - get existing nc_path or allocate a new one
+ * batadv_nc_get_path() - get existing nc_path or allocate a new one
  * @bat_priv: the bat priv with all the soft interface information
  * @hash: hash table containing the nc path
  * @src: ethernet source address - first half of the nc path search key
@@ -1006,7 +1010,7 @@ static struct batadv_nc_path *batadv_nc_get_path(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_nc_random_weight_tq - scale the receivers TQ-value to avoid unfair
+ * batadv_nc_random_weight_tq() - scale the receivers TQ-value to avoid unfair
  *  selection of a receiver with slightly lower TQ than the other
  * @tq: to be weighted tq value
  *
@@ -1029,7 +1033,7 @@ static u8 batadv_nc_random_weight_tq(u8 tq)
 }
 
 /**
- * batadv_nc_memxor - XOR destination with source
+ * batadv_nc_memxor() - XOR destination with source
  * @dst: byte array to XOR into
  * @src: byte array to XOR from
  * @len: length of destination array
@@ -1043,7 +1047,7 @@ static void batadv_nc_memxor(char *dst, const char *src, unsigned int len)
 }
 
 /**
- * batadv_nc_code_packets - code a received unicast_packet with an nc packet
+ * batadv_nc_code_packets() - code a received unicast_packet with an nc packet
  *  into a coded_packet and send it
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: data skb to forward
@@ -1236,7 +1240,7 @@ out:
 }
 
 /**
- * batadv_nc_skb_coding_possible - true if a decoded skb is available at dst.
+ * batadv_nc_skb_coding_possible() - true if a decoded skb is available at dst.
  * @skb: data skb to forward
  * @dst: destination mac address of the other skb to code with
  * @src: source mac address of skb
@@ -1260,7 +1264,7 @@ static bool batadv_nc_skb_coding_possible(struct sk_buff *skb, u8 *dst, u8 *src)
 }
 
 /**
- * batadv_nc_path_search - Find the coding path matching in_nc_node and
+ * batadv_nc_path_search() - Find the coding path matching in_nc_node and
  *  out_nc_node to retrieve a buffered packet that can be used for coding.
  * @bat_priv: the bat priv with all the soft interface information
  * @in_nc_node: pointer to skb next hop's neighbor nc node
@@ -1328,8 +1332,8 @@ batadv_nc_path_search(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_nc_skb_src_search - Loops through the list of neighoring nodes of the
- *  skb's sender (may be equal to the originator).
+ * batadv_nc_skb_src_search() - Loops through the list of neighoring nodes of
+ *  the skb's sender (may be equal to the originator).
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: data skb to forward
  * @eth_dst: next hop mac address of skb
@@ -1374,7 +1378,7 @@ batadv_nc_skb_src_search(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_nc_skb_store_before_coding - set the ethernet src and dst of the
+ * batadv_nc_skb_store_before_coding() - set the ethernet src and dst of the
  *  unicast skb before it is stored for use in later decoding
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: data skb to store
@@ -1409,7 +1413,7 @@ static void batadv_nc_skb_store_before_coding(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_nc_skb_dst_search - Loops through list of neighboring nodes to dst.
+ * batadv_nc_skb_dst_search() - Loops through list of neighboring nodes to dst.
  * @skb: data skb to forward
  * @neigh_node: next hop to forward packet to
  * @ethhdr: pointer to the ethernet header inside the skb
@@ -1467,7 +1471,7 @@ static bool batadv_nc_skb_dst_search(struct sk_buff *skb,
 }
 
 /**
- * batadv_nc_skb_add_to_path - buffer skb for later encoding / decoding
+ * batadv_nc_skb_add_to_path() - buffer skb for later encoding / decoding
  * @skb: skb to add to path
  * @nc_path: path to add skb to
  * @neigh_node: next hop to forward packet to
@@ -1502,7 +1506,7 @@ static bool batadv_nc_skb_add_to_path(struct sk_buff *skb,
 }
 
 /**
- * batadv_nc_skb_forward - try to code a packet or add it to the coding packet
+ * batadv_nc_skb_forward() - try to code a packet or add it to the coding packet
  *  buffer
  * @skb: data skb to forward
  * @neigh_node: next hop to forward packet to
@@ -1559,8 +1563,8 @@ out:
 }
 
 /**
- * batadv_nc_skb_store_for_decoding - save a clone of the skb which can be used
- *  when decoding coded packets
+ * batadv_nc_skb_store_for_decoding() - save a clone of the skb which can be
+ *  used when decoding coded packets
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: data skb to store
  */
@@ -1620,7 +1624,7 @@ out:
 }
 
 /**
- * batadv_nc_skb_store_sniffed_unicast - check if a received unicast packet
+ * batadv_nc_skb_store_sniffed_unicast() - check if a received unicast packet
  *  should be saved in the decoding buffer and, if so, store it there
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: unicast skb to store
@@ -1640,7 +1644,7 @@ void batadv_nc_skb_store_sniffed_unicast(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_nc_skb_decode_packet - decode given skb using the decode data stored
+ * batadv_nc_skb_decode_packet() - decode given skb using the decode data stored
  *  in nc_packet
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: unicast skb to decode
@@ -1734,7 +1738,7 @@ batadv_nc_skb_decode_packet(struct batadv_priv *bat_priv, struct sk_buff *skb,
 }
 
 /**
- * batadv_nc_find_decoding_packet - search through buffered decoding data to
+ * batadv_nc_find_decoding_packet() - search through buffered decoding data to
  *  find the data needed to decode the coded packet
  * @bat_priv: the bat priv with all the soft interface information
  * @ethhdr: pointer to the ethernet header inside the coded packet
@@ -1799,7 +1803,7 @@ batadv_nc_find_decoding_packet(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_nc_recv_coded_packet - try to decode coded packet and enqueue the
+ * batadv_nc_recv_coded_packet() - try to decode coded packet and enqueue the
  *  resulting unicast packet
  * @skb: incoming coded packet
  * @recv_if: pointer to interface this packet was received on
@@ -1874,7 +1878,7 @@ free_skb:
 }
 
 /**
- * batadv_nc_mesh_free - clean up network coding memory
+ * batadv_nc_mesh_free() - clean up network coding memory
  * @bat_priv: the bat priv with all the soft interface information
  */
 void batadv_nc_mesh_free(struct batadv_priv *bat_priv)
@@ -1891,7 +1895,7 @@ void batadv_nc_mesh_free(struct batadv_priv *bat_priv)
 
 #ifdef CONFIG_BATMAN_ADV_DEBUGFS
 /**
- * batadv_nc_nodes_seq_print_text - print the nc node information
+ * batadv_nc_nodes_seq_print_text() - print the nc node information
  * @seq: seq file to print on
  * @offset: not used
  *
@@ -1954,7 +1958,7 @@ out:
 }
 
 /**
- * batadv_nc_init_debugfs - create nc folder and related files in debugfs
+ * batadv_nc_init_debugfs() - create nc folder and related files in debugfs
  * @bat_priv: the bat priv with all the soft interface information
  *
  * Return: 0 on success or negative error number in case of failure
index c66efb8..adaeafa 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2012-2017  B.A.T.M.A.N. contributors:
  *
  * Martin Hundebøll, Jeppe Ledet-Pedersen
index 2967b86..58a7d92 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2009-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
@@ -21,7 +22,7 @@
 #include <linux/atomic.h>
 #include <linux/errno.h>
 #include <linux/etherdevice.h>
-#include <linux/fs.h>
+#include <linux/gfp.h>
 #include <linux/jiffies.h>
 #include <linux/kernel.h>
 #include <linux/kref.h>
 #include <linux/netdevice.h>
 #include <linux/netlink.h>
 #include <linux/rculist.h>
+#include <linux/rcupdate.h>
 #include <linux/seq_file.h>
 #include <linux/skbuff.h>
 #include <linux/slab.h>
 #include <linux/spinlock.h>
+#include <linux/stddef.h>
 #include <linux/workqueue.h>
 #include <net/sock.h>
 #include <uapi/linux/batman_adv.h>
 /* hash class keys */
 static struct lock_class_key batadv_orig_hash_lock_class_key;
 
+/**
+ * batadv_orig_hash_find() - Find and return originator from orig_hash
+ * @bat_priv: the bat priv with all the soft interface information
+ * @data: mac address of the originator
+ *
+ * Return: orig_node (with increased refcnt), NULL on errors
+ */
+struct batadv_orig_node *
+batadv_orig_hash_find(struct batadv_priv *bat_priv, const void *data)
+{
+       struct batadv_hashtable *hash = bat_priv->orig_hash;
+       struct hlist_head *head;
+       struct batadv_orig_node *orig_node, *orig_node_tmp = NULL;
+       int index;
+
+       if (!hash)
+               return NULL;
+
+       index = batadv_choose_orig(data, hash->size);
+       head = &hash->table[index];
+
+       rcu_read_lock();
+       hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
+               if (!batadv_compare_eth(orig_node, data))
+                       continue;
+
+               if (!kref_get_unless_zero(&orig_node->refcount))
+                       continue;
+
+               orig_node_tmp = orig_node;
+               break;
+       }
+       rcu_read_unlock();
+
+       return orig_node_tmp;
+}
+
 static void batadv_purge_orig(struct work_struct *work);
 
 /**
- * batadv_compare_orig - comparing function used in the originator hash table
+ * batadv_compare_orig() - comparing function used in the originator hash table
  * @node: node in the local table
  * @data2: second object to compare the node to
  *
@@ -73,7 +113,7 @@ bool batadv_compare_orig(const struct hlist_node *node, const void *data2)
 }
 
 /**
- * batadv_orig_node_vlan_get - get an orig_node_vlan object
+ * batadv_orig_node_vlan_get() - get an orig_node_vlan object
  * @orig_node: the originator serving the VLAN
  * @vid: the VLAN identifier
  *
@@ -104,7 +144,7 @@ batadv_orig_node_vlan_get(struct batadv_orig_node *orig_node,
 }
 
 /**
- * batadv_orig_node_vlan_new - search and possibly create an orig_node_vlan
+ * batadv_orig_node_vlan_new() - search and possibly create an orig_node_vlan
  *  object
  * @orig_node: the originator serving the VLAN
  * @vid: the VLAN identifier
@@ -145,7 +185,7 @@ out:
 }
 
 /**
- * batadv_orig_node_vlan_release - release originator-vlan object from lists
+ * batadv_orig_node_vlan_release() - release originator-vlan object from lists
  *  and queue for free after rcu grace period
  * @ref: kref pointer of the originator-vlan object
  */
@@ -159,7 +199,7 @@ static void batadv_orig_node_vlan_release(struct kref *ref)
 }
 
 /**
- * batadv_orig_node_vlan_put - decrement the refcounter and possibly release
+ * batadv_orig_node_vlan_put() - decrement the refcounter and possibly release
  *  the originator-vlan object
  * @orig_vlan: the originator-vlan object to release
  */
@@ -168,6 +208,12 @@ void batadv_orig_node_vlan_put(struct batadv_orig_node_vlan *orig_vlan)
        kref_put(&orig_vlan->refcount, batadv_orig_node_vlan_release);
 }
 
+/**
+ * batadv_originator_init() - Initialize all originator structures
+ * @bat_priv: the bat priv with all the soft interface information
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
 int batadv_originator_init(struct batadv_priv *bat_priv)
 {
        if (bat_priv->orig_hash)
@@ -193,7 +239,7 @@ err:
 }
 
 /**
- * batadv_neigh_ifinfo_release - release neigh_ifinfo from lists and queue for
+ * batadv_neigh_ifinfo_release() - release neigh_ifinfo from lists and queue for
  *  free after rcu grace period
  * @ref: kref pointer of the neigh_ifinfo
  */
@@ -210,7 +256,7 @@ static void batadv_neigh_ifinfo_release(struct kref *ref)
 }
 
 /**
- * batadv_neigh_ifinfo_put - decrement the refcounter and possibly release
+ * batadv_neigh_ifinfo_put() - decrement the refcounter and possibly release
  *  the neigh_ifinfo
  * @neigh_ifinfo: the neigh_ifinfo object to release
  */
@@ -220,7 +266,7 @@ void batadv_neigh_ifinfo_put(struct batadv_neigh_ifinfo *neigh_ifinfo)
 }
 
 /**
- * batadv_hardif_neigh_release - release hardif neigh node from lists and
+ * batadv_hardif_neigh_release() - release hardif neigh node from lists and
  *  queue for free after rcu grace period
  * @ref: kref pointer of the neigh_node
  */
@@ -240,7 +286,7 @@ static void batadv_hardif_neigh_release(struct kref *ref)
 }
 
 /**
- * batadv_hardif_neigh_put - decrement the hardif neighbors refcounter
+ * batadv_hardif_neigh_put() - decrement the hardif neighbors refcounter
  *  and possibly release it
  * @hardif_neigh: hardif neigh neighbor to free
  */
@@ -250,7 +296,7 @@ void batadv_hardif_neigh_put(struct batadv_hardif_neigh_node *hardif_neigh)
 }
 
 /**
- * batadv_neigh_node_release - release neigh_node from lists and queue for
+ * batadv_neigh_node_release() - release neigh_node from lists and queue for
  *  free after rcu grace period
  * @ref: kref pointer of the neigh_node
  */
@@ -275,7 +321,7 @@ static void batadv_neigh_node_release(struct kref *ref)
 }
 
 /**
- * batadv_neigh_node_put - decrement the neighbors refcounter and possibly
+ * batadv_neigh_node_put() - decrement the neighbors refcounter and possibly
  *  release it
  * @neigh_node: neigh neighbor to free
  */
@@ -285,7 +331,7 @@ void batadv_neigh_node_put(struct batadv_neigh_node *neigh_node)
 }
 
 /**
- * batadv_orig_router_get - router to the originator depending on iface
+ * batadv_orig_router_get() - router to the originator depending on iface
  * @orig_node: the orig node for the router
  * @if_outgoing: the interface where the payload packet has been received or
  *  the OGM should be sent to
@@ -318,7 +364,7 @@ batadv_orig_router_get(struct batadv_orig_node *orig_node,
 }
 
 /**
- * batadv_orig_ifinfo_get - find the ifinfo from an orig_node
+ * batadv_orig_ifinfo_get() - find the ifinfo from an orig_node
  * @orig_node: the orig node to be queried
  * @if_outgoing: the interface for which the ifinfo should be acquired
  *
@@ -350,7 +396,7 @@ batadv_orig_ifinfo_get(struct batadv_orig_node *orig_node,
 }
 
 /**
- * batadv_orig_ifinfo_new - search and possibly create an orig_ifinfo object
+ * batadv_orig_ifinfo_new() - search and possibly create an orig_ifinfo object
  * @orig_node: the orig node to be queried
  * @if_outgoing: the interface for which the ifinfo should be acquired
  *
@@ -396,7 +442,7 @@ out:
 }
 
 /**
- * batadv_neigh_ifinfo_get - find the ifinfo from an neigh_node
+ * batadv_neigh_ifinfo_get() - find the ifinfo from an neigh_node
  * @neigh: the neigh node to be queried
  * @if_outgoing: the interface for which the ifinfo should be acquired
  *
@@ -429,7 +475,7 @@ batadv_neigh_ifinfo_get(struct batadv_neigh_node *neigh,
 }
 
 /**
- * batadv_neigh_ifinfo_new - search and possibly create an neigh_ifinfo object
+ * batadv_neigh_ifinfo_new() - search and possibly create an neigh_ifinfo object
  * @neigh: the neigh node to be queried
  * @if_outgoing: the interface for which the ifinfo should be acquired
  *
@@ -472,7 +518,7 @@ out:
 }
 
 /**
- * batadv_neigh_node_get - retrieve a neighbour from the list
+ * batadv_neigh_node_get() - retrieve a neighbour from the list
  * @orig_node: originator which the neighbour belongs to
  * @hard_iface: the interface where this neighbour is connected to
  * @addr: the address of the neighbour
@@ -509,7 +555,7 @@ batadv_neigh_node_get(const struct batadv_orig_node *orig_node,
 }
 
 /**
- * batadv_hardif_neigh_create - create a hardif neighbour node
+ * batadv_hardif_neigh_create() - create a hardif neighbour node
  * @hard_iface: the interface this neighbour is connected to
  * @neigh_addr: the interface address of the neighbour to retrieve
  * @orig_node: originator object representing the neighbour
@@ -555,7 +601,7 @@ out:
 }
 
 /**
- * batadv_hardif_neigh_get_or_create - retrieve or create a hardif neighbour
+ * batadv_hardif_neigh_get_or_create() - retrieve or create a hardif neighbour
  *  node
  * @hard_iface: the interface this neighbour is connected to
  * @neigh_addr: the interface address of the neighbour to retrieve
@@ -579,7 +625,7 @@ batadv_hardif_neigh_get_or_create(struct batadv_hard_iface *hard_iface,
 }
 
 /**
- * batadv_hardif_neigh_get - retrieve a hardif neighbour from the list
+ * batadv_hardif_neigh_get() - retrieve a hardif neighbour from the list
  * @hard_iface: the interface where this neighbour is connected to
  * @neigh_addr: the address of the neighbour
  *
@@ -611,7 +657,7 @@ batadv_hardif_neigh_get(const struct batadv_hard_iface *hard_iface,
 }
 
 /**
- * batadv_neigh_node_create - create a neigh node object
+ * batadv_neigh_node_create() - create a neigh node object
  * @orig_node: originator object representing the neighbour
  * @hard_iface: the interface where the neighbour is connected to
  * @neigh_addr: the mac address of the neighbour interface
@@ -676,7 +722,7 @@ out:
 }
 
 /**
- * batadv_neigh_node_get_or_create - retrieve or create a neigh node object
+ * batadv_neigh_node_get_or_create() - retrieve or create a neigh node object
  * @orig_node: originator object representing the neighbour
  * @hard_iface: the interface where the neighbour is connected to
  * @neigh_addr: the mac address of the neighbour interface
@@ -700,7 +746,7 @@ batadv_neigh_node_get_or_create(struct batadv_orig_node *orig_node,
 
 #ifdef CONFIG_BATMAN_ADV_DEBUGFS
 /**
- * batadv_hardif_neigh_seq_print_text - print the single hop neighbour list
+ * batadv_hardif_neigh_seq_print_text() - print the single hop neighbour list
  * @seq: neighbour table seq_file struct
  * @offset: not used
  *
@@ -735,8 +781,8 @@ int batadv_hardif_neigh_seq_print_text(struct seq_file *seq, void *offset)
 #endif
 
 /**
- * batadv_hardif_neigh_dump - Dump to netlink the neighbor infos for a specific
- *  outgoing interface
+ * batadv_hardif_neigh_dump() - Dump to netlink the neighbor infos for a
+ *  specific outgoing interface
  * @msg: message to dump into
  * @cb: parameters for the dump
  *
@@ -812,7 +858,7 @@ int batadv_hardif_neigh_dump(struct sk_buff *msg, struct netlink_callback *cb)
 }
 
 /**
- * batadv_orig_ifinfo_release - release orig_ifinfo from lists and queue for
+ * batadv_orig_ifinfo_release() - release orig_ifinfo from lists and queue for
  *  free after rcu grace period
  * @ref: kref pointer of the orig_ifinfo
  */
@@ -835,7 +881,7 @@ static void batadv_orig_ifinfo_release(struct kref *ref)
 }
 
 /**
- * batadv_orig_ifinfo_put - decrement the refcounter and possibly release
+ * batadv_orig_ifinfo_put() - decrement the refcounter and possibly release
  *  the orig_ifinfo
  * @orig_ifinfo: the orig_ifinfo object to release
  */
@@ -845,7 +891,7 @@ void batadv_orig_ifinfo_put(struct batadv_orig_ifinfo *orig_ifinfo)
 }
 
 /**
- * batadv_orig_node_free_rcu - free the orig_node
+ * batadv_orig_node_free_rcu() - free the orig_node
  * @rcu: rcu pointer of the orig_node
  */
 static void batadv_orig_node_free_rcu(struct rcu_head *rcu)
@@ -866,7 +912,7 @@ static void batadv_orig_node_free_rcu(struct rcu_head *rcu)
 }
 
 /**
- * batadv_orig_node_release - release orig_node from lists and queue for
+ * batadv_orig_node_release() - release orig_node from lists and queue for
  *  free after rcu grace period
  * @ref: kref pointer of the orig_node
  */
@@ -917,7 +963,7 @@ static void batadv_orig_node_release(struct kref *ref)
 }
 
 /**
- * batadv_orig_node_put - decrement the orig node refcounter and possibly
+ * batadv_orig_node_put() - decrement the orig node refcounter and possibly
  *  release it
  * @orig_node: the orig node to free
  */
@@ -926,6 +972,10 @@ void batadv_orig_node_put(struct batadv_orig_node *orig_node)
        kref_put(&orig_node->refcount, batadv_orig_node_release);
 }
 
+/**
+ * batadv_originator_free() - Free all originator structures
+ * @bat_priv: the bat priv with all the soft interface information
+ */
 void batadv_originator_free(struct batadv_priv *bat_priv)
 {
        struct batadv_hashtable *hash = bat_priv->orig_hash;
@@ -959,7 +1009,7 @@ void batadv_originator_free(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_orig_node_new - creates a new orig_node
+ * batadv_orig_node_new() - creates a new orig_node
  * @bat_priv: the bat priv with all the soft interface information
  * @addr: the mac address of the originator
  *
@@ -1038,7 +1088,7 @@ free_orig_node:
 }
 
 /**
- * batadv_purge_neigh_ifinfo - purge obsolete ifinfo entries from neighbor
+ * batadv_purge_neigh_ifinfo() - purge obsolete ifinfo entries from neighbor
  * @bat_priv: the bat priv with all the soft interface information
  * @neigh: orig node which is to be checked
  */
@@ -1079,7 +1129,7 @@ batadv_purge_neigh_ifinfo(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_purge_orig_ifinfo - purge obsolete ifinfo entries from originator
+ * batadv_purge_orig_ifinfo() - purge obsolete ifinfo entries from originator
  * @bat_priv: the bat priv with all the soft interface information
  * @orig_node: orig node which is to be checked
  *
@@ -1131,7 +1181,7 @@ batadv_purge_orig_ifinfo(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_purge_orig_neighbors - purges neighbors from originator
+ * batadv_purge_orig_neighbors() - purges neighbors from originator
  * @bat_priv: the bat priv with all the soft interface information
  * @orig_node: orig node which is to be checked
  *
@@ -1189,7 +1239,7 @@ batadv_purge_orig_neighbors(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_find_best_neighbor - finds the best neighbor after purging
+ * batadv_find_best_neighbor() - finds the best neighbor after purging
  * @bat_priv: the bat priv with all the soft interface information
  * @orig_node: orig node which is to be checked
  * @if_outgoing: the interface for which the metric should be compared
@@ -1224,7 +1274,7 @@ batadv_find_best_neighbor(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_purge_orig_node - purges obsolete information from an orig_node
+ * batadv_purge_orig_node() - purges obsolete information from an orig_node
  * @bat_priv: the bat priv with all the soft interface information
  * @orig_node: orig node which is to be checked
  *
@@ -1341,12 +1391,24 @@ static void batadv_purge_orig(struct work_struct *work)
                           msecs_to_jiffies(BATADV_ORIG_WORK_PERIOD));
 }
 
+/**
+ * batadv_purge_orig_ref() - Purge all outdated originators
+ * @bat_priv: the bat priv with all the soft interface information
+ */
 void batadv_purge_orig_ref(struct batadv_priv *bat_priv)
 {
        _batadv_purge_orig(bat_priv);
 }
 
 #ifdef CONFIG_BATMAN_ADV_DEBUGFS
+
+/**
+ * batadv_orig_seq_print_text() - Print the originator table in a seq file
+ * @seq: seq file to print on
+ * @offset: not used
+ *
+ * Return: always 0
+ */
 int batadv_orig_seq_print_text(struct seq_file *seq, void *offset)
 {
        struct net_device *net_dev = (struct net_device *)seq->private;
@@ -1376,7 +1438,7 @@ int batadv_orig_seq_print_text(struct seq_file *seq, void *offset)
 }
 
 /**
- * batadv_orig_hardif_seq_print_text - writes originator infos for a specific
+ * batadv_orig_hardif_seq_print_text() - writes originator infos for a specific
  *  outgoing interface
  * @seq: debugfs table seq_file struct
  * @offset: not used
@@ -1423,7 +1485,7 @@ out:
 #endif
 
 /**
- * batadv_orig_dump - Dump to netlink the originator infos for a specific
+ * batadv_orig_dump() - Dump to netlink the originator infos for a specific
  *  outgoing interface
  * @msg: message to dump into
  * @cb: parameters for the dump
@@ -1499,6 +1561,13 @@ int batadv_orig_dump(struct sk_buff *msg, struct netlink_callback *cb)
        return ret;
 }
 
+/**
+ * batadv_orig_hash_add_if() - Add interface to originators in orig_hash
+ * @hard_iface: hard interface to add (already slave of the soft interface)
+ * @max_if_num: new number of interfaces
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
 int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface,
                            int max_if_num)
 {
@@ -1534,6 +1603,13 @@ err:
        return -ENOMEM;
 }
 
+/**
+ * batadv_orig_hash_del_if() - Remove interface from originators in orig_hash
+ * @hard_iface: hard interface to remove (still slave of the soft interface)
+ * @max_if_num: new number of interfaces
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
 int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface,
                            int max_if_num)
 {
index d94220a..8e543a3 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
 #include <linux/compiler.h>
 #include <linux/if_ether.h>
 #include <linux/jhash.h>
-#include <linux/kref.h>
-#include <linux/rculist.h>
-#include <linux/rcupdate.h>
-#include <linux/stddef.h>
 #include <linux/types.h>
 
-#include "hash.h"
-
 struct netlink_callback;
 struct seq_file;
 struct sk_buff;
@@ -89,8 +84,13 @@ batadv_orig_node_vlan_get(struct batadv_orig_node *orig_node,
                          unsigned short vid);
 void batadv_orig_node_vlan_put(struct batadv_orig_node_vlan *orig_vlan);
 
-/* hashfunction to choose an entry in a hash table of given size
- * hash algorithm from http://en.wikipedia.org/wiki/Hash_table
+/**
+ * batadv_choose_orig() - Return the index of the orig entry in the hash table
+ * @data: mac address of the originator node
+ * @size: the size of the hash table
+ *
+ * Return: the hash index where the object represented by @data should be
+ * stored at.
  */
 static inline u32 batadv_choose_orig(const void *data, u32 size)
 {
@@ -100,34 +100,7 @@ static inline u32 batadv_choose_orig(const void *data, u32 size)
        return hash % size;
 }
 
-static inline struct batadv_orig_node *
-batadv_orig_hash_find(struct batadv_priv *bat_priv, const void *data)
-{
-       struct batadv_hashtable *hash = bat_priv->orig_hash;
-       struct hlist_head *head;
-       struct batadv_orig_node *orig_node, *orig_node_tmp = NULL;
-       int index;
-
-       if (!hash)
-               return NULL;
-
-       index = batadv_choose_orig(data, hash->size);
-       head = &hash->table[index];
-
-       rcu_read_lock();
-       hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
-               if (!batadv_compare_eth(orig_node, data))
-                       continue;
-
-               if (!kref_get_unless_zero(&orig_node->refcount))
-                       continue;
-
-               orig_node_tmp = orig_node;
-               break;
-       }
-       rcu_read_unlock();
-
-       return orig_node_tmp;
-}
+struct batadv_orig_node *
+batadv_orig_hash_find(struct batadv_priv *bat_priv, const void *data);
 
 #endif /* _NET_BATMAN_ADV_ORIGINATOR_H_ */
index 40d9bf3..b6891e8 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
@@ -33,6 +34,7 @@
 #include <linux/skbuff.h>
 #include <linux/spinlock.h>
 #include <linux/stddef.h>
+#include <uapi/linux/batadv_packet.h>
 
 #include "bitarray.h"
 #include "bridge_loop_avoidance.h"
@@ -43,7 +45,6 @@
 #include "log.h"
 #include "network-coding.h"
 #include "originator.h"
-#include "packet.h"
 #include "send.h"
 #include "soft-interface.h"
 #include "tp_meter.h"
@@ -54,7 +55,7 @@ static int batadv_route_unicast_packet(struct sk_buff *skb,
                                       struct batadv_hard_iface *recv_if);
 
 /**
- * _batadv_update_route - set the router for this originator
+ * _batadv_update_route() - set the router for this originator
  * @bat_priv: the bat priv with all the soft interface information
  * @orig_node: orig node which is to be configured
  * @recv_if: the receive interface for which this route is set
@@ -118,7 +119,7 @@ static void _batadv_update_route(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_update_route - set the router for this originator
+ * batadv_update_route() - set the router for this originator
  * @bat_priv: the bat priv with all the soft interface information
  * @orig_node: orig node which is to be configured
  * @recv_if: the receive interface for which this route is set
@@ -145,7 +146,7 @@ out:
 }
 
 /**
- * batadv_window_protected - checks whether the host restarted and is in the
+ * batadv_window_protected() - checks whether the host restarted and is in the
  *  protection time.
  * @bat_priv: the bat priv with all the soft interface information
  * @seq_num_diff: difference between the current/received sequence number and
@@ -180,6 +181,14 @@ bool batadv_window_protected(struct batadv_priv *bat_priv, s32 seq_num_diff,
        return false;
 }
 
+/**
+ * batadv_check_management_packet() - Check preconditions for management packets
+ * @skb: incoming packet buffer
+ * @hard_iface: incoming hard interface
+ * @header_len: minimal header length of packet type
+ *
+ * Return: true when management preconditions are met, false otherwise
+ */
 bool batadv_check_management_packet(struct sk_buff *skb,
                                    struct batadv_hard_iface *hard_iface,
                                    int header_len)
@@ -212,7 +221,7 @@ bool batadv_check_management_packet(struct sk_buff *skb,
 }
 
 /**
- * batadv_recv_my_icmp_packet - receive an icmp packet locally
+ * batadv_recv_my_icmp_packet() - receive an icmp packet locally
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: icmp packet to process
  *
@@ -347,6 +356,13 @@ out:
        return ret;
 }
 
+/**
+ * batadv_recv_icmp_packet() - Process incoming icmp packet
+ * @skb: incoming packet buffer
+ * @recv_if: incoming hard interface
+ *
+ * Return: NET_RX_SUCCESS on success or NET_RX_DROP in case of failure
+ */
 int batadv_recv_icmp_packet(struct sk_buff *skb,
                            struct batadv_hard_iface *recv_if)
 {
@@ -440,7 +456,7 @@ free_skb:
 }
 
 /**
- * batadv_check_unicast_packet - Check for malformed unicast packets
+ * batadv_check_unicast_packet() - Check for malformed unicast packets
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: packet to check
  * @hdr_size: size of header to pull
@@ -478,7 +494,7 @@ static int batadv_check_unicast_packet(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_last_bonding_get - Get last_bonding_candidate of orig_node
+ * batadv_last_bonding_get() - Get last_bonding_candidate of orig_node
  * @orig_node: originator node whose last bonding candidate should be retrieved
  *
  * Return: last bonding candidate of router or NULL if not found
@@ -501,7 +517,7 @@ batadv_last_bonding_get(struct batadv_orig_node *orig_node)
 }
 
 /**
- * batadv_last_bonding_replace - Replace last_bonding_candidate of orig_node
+ * batadv_last_bonding_replace() - Replace last_bonding_candidate of orig_node
  * @orig_node: originator node whose bonding candidates should be replaced
  * @new_candidate: new bonding candidate or NULL
  */
@@ -524,7 +540,7 @@ batadv_last_bonding_replace(struct batadv_orig_node *orig_node,
 }
 
 /**
- * batadv_find_router - find a suitable router for this originator
+ * batadv_find_router() - find a suitable router for this originator
  * @bat_priv: the bat priv with all the soft interface information
  * @orig_node: the destination node
  * @recv_if: pointer to interface this packet was received on
@@ -741,7 +757,7 @@ free_skb:
 }
 
 /**
- * batadv_reroute_unicast_packet - update the unicast header for re-routing
+ * batadv_reroute_unicast_packet() - update the unicast header for re-routing
  * @bat_priv: the bat priv with all the soft interface information
  * @unicast_packet: the unicast header to be updated
  * @dst_addr: the payload destination
@@ -904,7 +920,7 @@ static bool batadv_check_unicast_ttvn(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_recv_unhandled_unicast_packet - receive and process packets which
+ * batadv_recv_unhandled_unicast_packet() - receive and process packets which
  *     are in the unicast number space but not yet known to the implementation
  * @skb: unicast tvlv packet to process
  * @recv_if: pointer to interface this packet was received on
@@ -935,6 +951,13 @@ free_skb:
        return NET_RX_DROP;
 }
 
+/**
+ * batadv_recv_unicast_packet() - Process incoming unicast packet
+ * @skb: incoming packet buffer
+ * @recv_if: incoming hard interface
+ *
+ * Return: NET_RX_SUCCESS on success or NET_RX_DROP in case of failure
+ */
 int batadv_recv_unicast_packet(struct sk_buff *skb,
                               struct batadv_hard_iface *recv_if)
 {
@@ -1036,7 +1059,7 @@ free_skb:
 }
 
 /**
- * batadv_recv_unicast_tvlv - receive and process unicast tvlv packets
+ * batadv_recv_unicast_tvlv() - receive and process unicast tvlv packets
  * @skb: unicast tvlv packet to process
  * @recv_if: pointer to interface this packet was received on
  *
@@ -1090,7 +1113,7 @@ free_skb:
 }
 
 /**
- * batadv_recv_frag_packet - process received fragment
+ * batadv_recv_frag_packet() - process received fragment
  * @skb: the received fragment
  * @recv_if: interface that the skb is received on
  *
@@ -1155,6 +1178,13 @@ free_skb:
        return ret;
 }
 
+/**
+ * batadv_recv_bcast_packet() - Process incoming broadcast packet
+ * @skb: incoming packet buffer
+ * @recv_if: incoming hard interface
+ *
+ * Return: NET_RX_SUCCESS on success or NET_RX_DROP in case of failure
+ */
 int batadv_recv_bcast_packet(struct sk_buff *skb,
                             struct batadv_hard_iface *recv_if)
 {
index 5ede16c..a1289bc 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
index 7895323..2a5ab6f 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
@@ -23,7 +24,7 @@
 #include <linux/byteorder/generic.h>
 #include <linux/errno.h>
 #include <linux/etherdevice.h>
-#include <linux/fs.h>
+#include <linux/gfp.h>
 #include <linux/if.h>
 #include <linux/if_ether.h>
 #include <linux/jiffies.h>
@@ -54,7 +55,7 @@
 static void batadv_send_outstanding_bcast_packet(struct work_struct *work);
 
 /**
- * batadv_send_skb_packet - send an already prepared packet
+ * batadv_send_skb_packet() - send an already prepared packet
  * @skb: the packet to send
  * @hard_iface: the interface to use to send the broadcast packet
  * @dst_addr: the payload destination
@@ -123,12 +124,30 @@ send_skb_err:
        return NET_XMIT_DROP;
 }
 
+/**
+ * batadv_send_broadcast_skb() - Send broadcast packet via hard interface
+ * @skb: packet to be transmitted (with batadv header and no outer eth header)
+ * @hard_iface: outgoing interface
+ *
+ * Return: A negative errno code is returned on a failure. A success does not
+ * guarantee the frame will be transmitted as it may be dropped due
+ * to congestion or traffic shaping.
+ */
 int batadv_send_broadcast_skb(struct sk_buff *skb,
                              struct batadv_hard_iface *hard_iface)
 {
        return batadv_send_skb_packet(skb, hard_iface, batadv_broadcast_addr);
 }
 
+/**
+ * batadv_send_unicast_skb() - Send unicast packet to neighbor
+ * @skb: packet to be transmitted (with batadv header and no outer eth header)
+ * @neigh: neighbor which is used as next hop to destination
+ *
+ * Return: A negative errno code is returned on a failure. A success does not
+ * guarantee the frame will be transmitted as it may be dropped due
+ * to congestion or traffic shaping.
+ */
 int batadv_send_unicast_skb(struct sk_buff *skb,
                            struct batadv_neigh_node *neigh)
 {
@@ -153,7 +172,7 @@ int batadv_send_unicast_skb(struct sk_buff *skb,
 }
 
 /**
- * batadv_send_skb_to_orig - Lookup next-hop and transmit skb.
+ * batadv_send_skb_to_orig() - Lookup next-hop and transmit skb.
  * @skb: Packet to be transmitted.
  * @orig_node: Final destination of the packet.
  * @recv_if: Interface used when receiving the packet (can be NULL).
@@ -216,7 +235,7 @@ free_skb:
 }
 
 /**
- * batadv_send_skb_push_fill_unicast - extend the buffer and initialize the
+ * batadv_send_skb_push_fill_unicast() - extend the buffer and initialize the
  *  common fields for unicast packets
  * @skb: the skb carrying the unicast header to initialize
  * @hdr_size: amount of bytes to push at the beginning of the skb
@@ -249,7 +268,7 @@ batadv_send_skb_push_fill_unicast(struct sk_buff *skb, int hdr_size,
 }
 
 /**
- * batadv_send_skb_prepare_unicast - encapsulate an skb with a unicast header
+ * batadv_send_skb_prepare_unicast() - encapsulate an skb with a unicast header
  * @skb: the skb containing the payload to encapsulate
  * @orig_node: the destination node
  *
@@ -264,7 +283,7 @@ static bool batadv_send_skb_prepare_unicast(struct sk_buff *skb,
 }
 
 /**
- * batadv_send_skb_prepare_unicast_4addr - encapsulate an skb with a
+ * batadv_send_skb_prepare_unicast_4addr() - encapsulate an skb with a
  *  unicast 4addr header
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: the skb containing the payload to encapsulate
@@ -308,7 +327,7 @@ out:
 }
 
 /**
- * batadv_send_skb_unicast - encapsulate and send an skb via unicast
+ * batadv_send_skb_unicast() - encapsulate and send an skb via unicast
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: payload to send
  * @packet_type: the batman unicast packet type to use
@@ -378,7 +397,7 @@ out:
 }
 
 /**
- * batadv_send_skb_via_tt_generic - send an skb via TT lookup
+ * batadv_send_skb_via_tt_generic() - send an skb via TT lookup
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: payload to send
  * @packet_type: the batman unicast packet type to use
@@ -425,7 +444,7 @@ int batadv_send_skb_via_tt_generic(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_send_skb_via_gw - send an skb via gateway lookup
+ * batadv_send_skb_via_gw() - send an skb via gateway lookup
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: payload to send
  * @vid: the vid to be used to search the translation table
@@ -452,7 +471,7 @@ int batadv_send_skb_via_gw(struct batadv_priv *bat_priv, struct sk_buff *skb,
 }
 
 /**
- * batadv_forw_packet_free - free a forwarding packet
+ * batadv_forw_packet_free() - free a forwarding packet
  * @forw_packet: The packet to free
  * @dropped: whether the packet is freed because is is dropped
  *
@@ -477,7 +496,7 @@ void batadv_forw_packet_free(struct batadv_forw_packet *forw_packet,
 }
 
 /**
- * batadv_forw_packet_alloc - allocate a forwarding packet
+ * batadv_forw_packet_alloc() - allocate a forwarding packet
  * @if_incoming: The (optional) if_incoming to be grabbed
  * @if_outgoing: The (optional) if_outgoing to be grabbed
  * @queue_left: The (optional) queue counter to decrease
@@ -543,7 +562,7 @@ err:
 }
 
 /**
- * batadv_forw_packet_was_stolen - check whether someone stole this packet
+ * batadv_forw_packet_was_stolen() - check whether someone stole this packet
  * @forw_packet: the forwarding packet to check
  *
  * This function checks whether the given forwarding packet was claimed by
@@ -558,7 +577,7 @@ batadv_forw_packet_was_stolen(struct batadv_forw_packet *forw_packet)
 }
 
 /**
- * batadv_forw_packet_steal - claim a forw_packet for free()
+ * batadv_forw_packet_steal() - claim a forw_packet for free()
  * @forw_packet: the forwarding packet to steal
  * @lock: a key to the store to steal from (e.g. forw_{bat,bcast}_list_lock)
  *
@@ -589,7 +608,7 @@ bool batadv_forw_packet_steal(struct batadv_forw_packet *forw_packet,
 }
 
 /**
- * batadv_forw_packet_list_steal - claim a list of forward packets for free()
+ * batadv_forw_packet_list_steal() - claim a list of forward packets for free()
  * @forw_list: the to be stolen forward packets
  * @cleanup_list: a backup pointer, to be able to dispose the packet later
  * @hard_iface: the interface to steal forward packets from
@@ -625,7 +644,7 @@ batadv_forw_packet_list_steal(struct hlist_head *forw_list,
 }
 
 /**
- * batadv_forw_packet_list_free - free a list of forward packets
+ * batadv_forw_packet_list_free() - free a list of forward packets
  * @head: a list of to be freed forw_packets
  *
  * This function cancels the scheduling of any packet in the provided list,
@@ -649,7 +668,7 @@ static void batadv_forw_packet_list_free(struct hlist_head *head)
 }
 
 /**
- * batadv_forw_packet_queue - try to queue a forwarding packet
+ * batadv_forw_packet_queue() - try to queue a forwarding packet
  * @forw_packet: the forwarding packet to queue
  * @lock: a key to the store (e.g. forw_{bat,bcast}_list_lock)
  * @head: the shelve to queue it on (e.g. forw_{bat,bcast}_list)
@@ -693,7 +712,7 @@ static void batadv_forw_packet_queue(struct batadv_forw_packet *forw_packet,
 }
 
 /**
- * batadv_forw_packet_bcast_queue - try to queue a broadcast packet
+ * batadv_forw_packet_bcast_queue() - try to queue a broadcast packet
  * @bat_priv: the bat priv with all the soft interface information
  * @forw_packet: the forwarding packet to queue
  * @send_time: timestamp (jiffies) when the packet is to be sent
@@ -712,7 +731,7 @@ batadv_forw_packet_bcast_queue(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_forw_packet_ogmv1_queue - try to queue an OGMv1 packet
+ * batadv_forw_packet_ogmv1_queue() - try to queue an OGMv1 packet
  * @bat_priv: the bat priv with all the soft interface information
  * @forw_packet: the forwarding packet to queue
  * @send_time: timestamp (jiffies) when the packet is to be sent
@@ -730,7 +749,7 @@ void batadv_forw_packet_ogmv1_queue(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_add_bcast_packet_to_list - queue broadcast packet for multiple sends
+ * batadv_add_bcast_packet_to_list() - queue broadcast packet for multiple sends
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: broadcast packet to add
  * @delay: number of jiffies to wait before sending
@@ -790,7 +809,7 @@ err:
 }
 
 /**
- * batadv_forw_packet_bcasts_left - check if a retransmission is necessary
+ * batadv_forw_packet_bcasts_left() - check if a retransmission is necessary
  * @forw_packet: the forwarding packet to check
  * @hard_iface: the interface to check on
  *
@@ -818,7 +837,8 @@ batadv_forw_packet_bcasts_left(struct batadv_forw_packet *forw_packet,
 }
 
 /**
- * batadv_forw_packet_bcasts_inc - increment retransmission counter of a packet
+ * batadv_forw_packet_bcasts_inc() - increment retransmission counter of a
+ *  packet
  * @forw_packet: the packet to increase the counter for
  */
 static void
@@ -828,7 +848,7 @@ batadv_forw_packet_bcasts_inc(struct batadv_forw_packet *forw_packet)
 }
 
 /**
- * batadv_forw_packet_is_rebroadcast - check packet for previous transmissions
+ * batadv_forw_packet_is_rebroadcast() - check packet for previous transmissions
  * @forw_packet: the packet to check
  *
  * Return: True if this packet was transmitted before, false otherwise.
@@ -953,7 +973,7 @@ out:
 }
 
 /**
- * batadv_purge_outstanding_packets - stop/purge scheduled bcast/OGMv1 packets
+ * batadv_purge_outstanding_packets() - stop/purge scheduled bcast/OGMv1 packets
  * @bat_priv: the bat priv with all the soft interface information
  * @hard_iface: the hard interface to cancel and purge bcast/ogm packets on
  *
index a16b34f..1e8c790 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
@@ -23,8 +24,7 @@
 #include <linux/compiler.h>
 #include <linux/spinlock.h>
 #include <linux/types.h>
-
-#include "packet.h"
+#include <uapi/linux/batadv_packet.h>
 
 struct sk_buff;
 
@@ -76,7 +76,7 @@ int batadv_send_skb_via_gw(struct batadv_priv *bat_priv, struct sk_buff *skb,
                           unsigned short vid);
 
 /**
- * batadv_send_skb_via_tt - send an skb via TT lookup
+ * batadv_send_skb_via_tt() - send an skb via TT lookup
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: the payload to send
  * @dst_hint: can be used to override the destination contained in the skb
@@ -97,7 +97,7 @@ static inline int batadv_send_skb_via_tt(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_send_skb_via_tt_4addr - send an skb via TT lookup
+ * batadv_send_skb_via_tt_4addr() - send an skb via TT lookup
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: the payload to send
  * @packet_subtype: the unicast 4addr packet subtype to use
index 9f673cd..900c5ce 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
@@ -26,7 +27,7 @@
 #include <linux/errno.h>
 #include <linux/etherdevice.h>
 #include <linux/ethtool.h>
-#include <linux/fs.h>
+#include <linux/gfp.h>
 #include <linux/if_ether.h>
 #include <linux/if_vlan.h>
 #include <linux/jiffies.h>
@@ -48,6 +49,7 @@
 #include <linux/stddef.h>
 #include <linux/string.h>
 #include <linux/types.h>
+#include <uapi/linux/batadv_packet.h>
 
 #include "bat_algo.h"
 #include "bridge_loop_avoidance.h"
 #include "multicast.h"
 #include "network-coding.h"
 #include "originator.h"
-#include "packet.h"
 #include "send.h"
 #include "sysfs.h"
 #include "translation-table.h"
 
+/**
+ * batadv_skb_head_push() - Increase header size and move (push) head pointer
+ * @skb: packet buffer which should be modified
+ * @len: number of bytes to add
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
 int batadv_skb_head_push(struct sk_buff *skb, unsigned int len)
 {
        int result;
@@ -96,7 +104,7 @@ static int batadv_interface_release(struct net_device *dev)
 }
 
 /**
- * batadv_sum_counter - Sum the cpu-local counters for index 'idx'
+ * batadv_sum_counter() - Sum the cpu-local counters for index 'idx'
  * @bat_priv: the bat priv with all the soft interface information
  * @idx: index of counter to sum up
  *
@@ -169,7 +177,7 @@ static int batadv_interface_change_mtu(struct net_device *dev, int new_mtu)
 }
 
 /**
- * batadv_interface_set_rx_mode - set the rx mode of a device
+ * batadv_interface_set_rx_mode() - set the rx mode of a device
  * @dev: registered network device to modify
  *
  * We do not actually need to set any rx filters for the virtual batman
@@ -389,7 +397,7 @@ end:
 }
 
 /**
- * batadv_interface_rx - receive ethernet frame on local batman-adv interface
+ * batadv_interface_rx() - receive ethernet frame on local batman-adv interface
  * @soft_iface: local interface which will receive the ethernet frame
  * @skb: ethernet frame for @soft_iface
  * @hdr_size: size of already parsed batman-adv header
@@ -501,8 +509,8 @@ out:
 }
 
 /**
- * batadv_softif_vlan_release - release vlan from lists and queue for free after
- *  rcu grace period
+ * batadv_softif_vlan_release() - release vlan from lists and queue for free
+ *  after rcu grace period
  * @ref: kref pointer of the vlan object
  */
 static void batadv_softif_vlan_release(struct kref *ref)
@@ -519,7 +527,7 @@ static void batadv_softif_vlan_release(struct kref *ref)
 }
 
 /**
- * batadv_softif_vlan_put - decrease the vlan object refcounter and
+ * batadv_softif_vlan_put() - decrease the vlan object refcounter and
  *  possibly release it
  * @vlan: the vlan object to release
  */
@@ -532,7 +540,7 @@ void batadv_softif_vlan_put(struct batadv_softif_vlan *vlan)
 }
 
 /**
- * batadv_softif_vlan_get - get the vlan object for a specific vid
+ * batadv_softif_vlan_get() - get the vlan object for a specific vid
  * @bat_priv: the bat priv with all the soft interface information
  * @vid: the identifier of the vlan object to retrieve
  *
@@ -561,7 +569,7 @@ struct batadv_softif_vlan *batadv_softif_vlan_get(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_softif_create_vlan - allocate the needed resources for a new vlan
+ * batadv_softif_create_vlan() - allocate the needed resources for a new vlan
  * @bat_priv: the bat priv with all the soft interface information
  * @vid: the VLAN identifier
  *
@@ -613,7 +621,7 @@ int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid)
 }
 
 /**
- * batadv_softif_destroy_vlan - remove and destroy a softif_vlan object
+ * batadv_softif_destroy_vlan() - remove and destroy a softif_vlan object
  * @bat_priv: the bat priv with all the soft interface information
  * @vlan: the object to remove
  */
@@ -631,7 +639,7 @@ static void batadv_softif_destroy_vlan(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_interface_add_vid - ndo_add_vid API implementation
+ * batadv_interface_add_vid() - ndo_add_vid API implementation
  * @dev: the netdev of the mesh interface
  * @proto: protocol of the the vlan id
  * @vid: identifier of the new vlan
@@ -689,7 +697,7 @@ static int batadv_interface_add_vid(struct net_device *dev, __be16 proto,
 }
 
 /**
- * batadv_interface_kill_vid - ndo_kill_vid API implementation
+ * batadv_interface_kill_vid() - ndo_kill_vid API implementation
  * @dev: the netdev of the mesh interface
  * @proto: protocol of the the vlan id
  * @vid: identifier of the deleted vlan
@@ -732,7 +740,7 @@ static struct lock_class_key batadv_netdev_xmit_lock_key;
 static struct lock_class_key batadv_netdev_addr_lock_key;
 
 /**
- * batadv_set_lockdep_class_one - Set lockdep class for a single tx queue
+ * batadv_set_lockdep_class_one() - Set lockdep class for a single tx queue
  * @dev: device which owns the tx queue
  * @txq: tx queue to modify
  * @_unused: always NULL
@@ -745,7 +753,7 @@ static void batadv_set_lockdep_class_one(struct net_device *dev,
 }
 
 /**
- * batadv_set_lockdep_class - Set txq and addr_list lockdep class
+ * batadv_set_lockdep_class() - Set txq and addr_list lockdep class
  * @dev: network device to modify
  */
 static void batadv_set_lockdep_class(struct net_device *dev)
@@ -755,7 +763,7 @@ static void batadv_set_lockdep_class(struct net_device *dev)
 }
 
 /**
- * batadv_softif_init_late - late stage initialization of soft interface
+ * batadv_softif_init_late() - late stage initialization of soft interface
  * @dev: registered network device to modify
  *
  * Return: error code on failures
@@ -860,7 +868,7 @@ free_bat_counters:
 }
 
 /**
- * batadv_softif_slave_add - Add a slave interface to a batadv_soft_interface
+ * batadv_softif_slave_add() - Add a slave interface to a batadv_soft_interface
  * @dev: batadv_soft_interface used as master interface
  * @slave_dev: net_device which should become the slave interface
  * @extack: extended ACK report struct
@@ -888,7 +896,7 @@ out:
 }
 
 /**
- * batadv_softif_slave_del - Delete a slave iface from a batadv_soft_interface
+ * batadv_softif_slave_del() - Delete a slave iface from a batadv_soft_interface
  * @dev: batadv_soft_interface used as master interface
  * @slave_dev: net_device which should be removed from the master interface
  *
@@ -1023,7 +1031,7 @@ static const struct ethtool_ops batadv_ethtool_ops = {
 };
 
 /**
- * batadv_softif_free - Deconstructor of batadv_soft_interface
+ * batadv_softif_free() - Deconstructor of batadv_soft_interface
  * @dev: Device to cleanup and remove
  */
 static void batadv_softif_free(struct net_device *dev)
@@ -1039,7 +1047,7 @@ static void batadv_softif_free(struct net_device *dev)
 }
 
 /**
- * batadv_softif_init_early - early stage initialization of soft interface
+ * batadv_softif_init_early() - early stage initialization of soft interface
  * @dev: registered network device to modify
  */
 static void batadv_softif_init_early(struct net_device *dev)
@@ -1063,6 +1071,13 @@ static void batadv_softif_init_early(struct net_device *dev)
        dev->ethtool_ops = &batadv_ethtool_ops;
 }
 
+/**
+ * batadv_softif_create() - Create and register soft interface
+ * @net: the applicable net namespace
+ * @name: name of the new soft interface
+ *
+ * Return: newly allocated soft_interface, NULL on errors
+ */
 struct net_device *batadv_softif_create(struct net *net, const char *name)
 {
        struct net_device *soft_iface;
@@ -1089,7 +1104,7 @@ struct net_device *batadv_softif_create(struct net *net, const char *name)
 }
 
 /**
- * batadv_softif_destroy_sysfs - deletion of batadv_soft_interface via sysfs
+ * batadv_softif_destroy_sysfs() - deletion of batadv_soft_interface via sysfs
  * @soft_iface: the to-be-removed batman-adv interface
  */
 void batadv_softif_destroy_sysfs(struct net_device *soft_iface)
@@ -1111,7 +1126,8 @@ void batadv_softif_destroy_sysfs(struct net_device *soft_iface)
 }
 
 /**
- * batadv_softif_destroy_netlink - deletion of batadv_soft_interface via netlink
+ * batadv_softif_destroy_netlink() - deletion of batadv_soft_interface via
+ *  netlink
  * @soft_iface: the to-be-removed batman-adv interface
  * @head: list pointer
  */
@@ -1139,6 +1155,12 @@ static void batadv_softif_destroy_netlink(struct net_device *soft_iface,
        unregister_netdevice_queue(soft_iface, head);
 }
 
+/**
+ * batadv_softif_is_valid() - Check whether device is a batadv soft interface
+ * @net_dev: device which should be checked
+ *
+ * Return: true when net_dev is a batman-adv interface, false otherwise
+ */
 bool batadv_softif_is_valid(const struct net_device *net_dev)
 {
        if (net_dev->netdev_ops->ndo_start_xmit == batadv_interface_tx)
index 639c3ab..075c5b5 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
index aa187fd..c1578fa 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2010-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
 #include <linux/compiler.h>
 #include <linux/device.h>
 #include <linux/errno.h>
-#include <linux/fs.h>
+#include <linux/gfp.h>
 #include <linux/if.h>
 #include <linux/if_vlan.h>
 #include <linux/kernel.h>
+#include <linux/kobject.h>
 #include <linux/kref.h>
 #include <linux/netdevice.h>
 #include <linux/printk.h>
@@ -37,6 +39,7 @@
 #include <linux/string.h>
 #include <linux/stringify.h>
 #include <linux/workqueue.h>
+#include <uapi/linux/batadv_packet.h>
 
 #include "bridge_loop_avoidance.h"
 #include "distributed-arp-table.h"
@@ -45,7 +48,6 @@
 #include "hard-interface.h"
 #include "log.h"
 #include "network-coding.h"
-#include "packet.h"
 #include "soft-interface.h"
 
 static struct net_device *batadv_kobj_to_netdev(struct kobject *obj)
@@ -63,7 +65,7 @@ static struct batadv_priv *batadv_kobj_to_batpriv(struct kobject *obj)
 }
 
 /**
- * batadv_vlan_kobj_to_batpriv - convert a vlan kobj in the associated batpriv
+ * batadv_vlan_kobj_to_batpriv() - convert a vlan kobj in the associated batpriv
  * @obj: kobject to covert
  *
  * Return: the associated batadv_priv struct.
@@ -83,7 +85,7 @@ static struct batadv_priv *batadv_vlan_kobj_to_batpriv(struct kobject *obj)
 }
 
 /**
- * batadv_kobj_to_vlan - convert a kobj in the associated softif_vlan struct
+ * batadv_kobj_to_vlan() - convert a kobj in the associated softif_vlan struct
  * @bat_priv: the bat priv with all the soft interface information
  * @obj: kobject to covert
  *
@@ -598,7 +600,7 @@ static ssize_t batadv_store_gw_bwidth(struct kobject *kobj,
 }
 
 /**
- * batadv_show_isolation_mark - print the current isolation mark/mask
+ * batadv_show_isolation_mark() - print the current isolation mark/mask
  * @kobj: kobject representing the private mesh sysfs directory
  * @attr: the batman-adv attribute the user is interacting with
  * @buff: the buffer that will contain the data to send back to the user
@@ -616,8 +618,8 @@ static ssize_t batadv_show_isolation_mark(struct kobject *kobj,
 }
 
 /**
- * batadv_store_isolation_mark - parse and store the isolation mark/mask entered
- *  by the user
+ * batadv_store_isolation_mark() - parse and store the isolation mark/mask
+ *  entered by the user
  * @kobj: kobject representing the private mesh sysfs directory
  * @attr: the batman-adv attribute the user is interacting with
  * @buff: the buffer containing the user data
@@ -733,6 +735,12 @@ static struct batadv_attribute *batadv_vlan_attrs[] = {
        NULL,
 };
 
+/**
+ * batadv_sysfs_add_meshif() - Add soft interface specific sysfs entries
+ * @dev: netdev struct of the soft interface
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
 int batadv_sysfs_add_meshif(struct net_device *dev)
 {
        struct kobject *batif_kobject = &dev->dev.kobj;
@@ -773,6 +781,10 @@ out:
        return -ENOMEM;
 }
 
+/**
+ * batadv_sysfs_del_meshif() - Remove soft interface specific sysfs entries
+ * @dev: netdev struct of the soft interface
+ */
 void batadv_sysfs_del_meshif(struct net_device *dev)
 {
        struct batadv_priv *bat_priv = netdev_priv(dev);
@@ -788,7 +800,7 @@ void batadv_sysfs_del_meshif(struct net_device *dev)
 }
 
 /**
- * batadv_sysfs_add_vlan - add all the needed sysfs objects for the new vlan
+ * batadv_sysfs_add_vlan() - add all the needed sysfs objects for the new vlan
  * @dev: netdev of the mesh interface
  * @vlan: private data of the newly added VLAN interface
  *
@@ -849,7 +861,7 @@ out:
 }
 
 /**
- * batadv_sysfs_del_vlan - remove all the sysfs objects for a given VLAN
+ * batadv_sysfs_del_vlan() - remove all the sysfs objects for a given VLAN
  * @bat_priv: the bat priv with all the soft interface information
  * @vlan: the private data of the VLAN to destroy
  */
@@ -894,7 +906,7 @@ static ssize_t batadv_show_mesh_iface(struct kobject *kobj,
 }
 
 /**
- * batadv_store_mesh_iface_finish - store new hardif mesh_iface state
+ * batadv_store_mesh_iface_finish() - store new hardif mesh_iface state
  * @net_dev: netdevice to add/remove to/from batman-adv soft-interface
  * @ifname: name of soft-interface to modify
  *
@@ -947,7 +959,7 @@ out:
 }
 
 /**
- * batadv_store_mesh_iface_work - store new hardif mesh_iface state
+ * batadv_store_mesh_iface_work() - store new hardif mesh_iface state
  * @work: work queue item
  *
  * Changes the parts of the hard+soft interface which can not be modified under
@@ -1043,7 +1055,7 @@ static ssize_t batadv_show_iface_status(struct kobject *kobj,
 #ifdef CONFIG_BATMAN_ADV_BATMAN_V
 
 /**
- * batadv_store_throughput_override - parse and store throughput override
+ * batadv_store_throughput_override() - parse and store throughput override
  *  entered by the user
  * @kobj: kobject representing the private mesh sysfs directory
  * @attr: the batman-adv attribute the user is interacting with
@@ -1130,6 +1142,13 @@ static struct batadv_attribute *batadv_batman_attrs[] = {
        NULL,
 };
 
+/**
+ * batadv_sysfs_add_hardif() - Add hard interface specific sysfs entries
+ * @hardif_obj: address where to store the pointer to new sysfs folder
+ * @dev: netdev struct of the hard interface
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
 int batadv_sysfs_add_hardif(struct kobject **hardif_obj, struct net_device *dev)
 {
        struct kobject *hardif_kobject = &dev->dev.kobj;
@@ -1164,6 +1183,11 @@ out:
        return -ENOMEM;
 }
 
+/**
+ * batadv_sysfs_del_hardif() - Remove hard interface specific sysfs entries
+ * @hardif_obj: address to the pointer to which stores batman-adv sysfs folder
+ *  of the hard interface
+ */
 void batadv_sysfs_del_hardif(struct kobject **hardif_obj)
 {
        kobject_uevent(*hardif_obj, KOBJ_REMOVE);
@@ -1172,6 +1196,16 @@ void batadv_sysfs_del_hardif(struct kobject **hardif_obj)
        *hardif_obj = NULL;
 }
 
+/**
+ * batadv_throw_uevent() - Send an uevent with batman-adv specific env data
+ * @bat_priv: the bat priv with all the soft interface information
+ * @type: subsystem type of event. Stored in uevent's BATTYPE
+ * @action: action type of event. Stored in uevent's BATACTION
+ * @data: string with additional information to the event (ignored for
+ *  BATADV_UEV_DEL). Stored in uevent's BATDATA
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
 int batadv_throw_uevent(struct batadv_priv *bat_priv, enum batadv_uev_type type,
                        enum batadv_uev_action action, const char *data)
 {
index e487412..bbeee61 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2010-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
@@ -35,10 +36,23 @@ struct net_device;
  */
 #define BATADV_SYSFS_VLAN_SUBDIR_PREFIX "vlan"
 
+/**
+ * struct batadv_attribute - sysfs export helper for batman-adv attributes
+ */
 struct batadv_attribute {
+       /** @attr: sysfs attribute file */
        struct attribute attr;
+
+       /**
+        * @show: function to export the current attribute's content to sysfs
+        */
        ssize_t (*show)(struct kobject *kobj, struct attribute *attr,
                        char *buf);
+
+       /**
+        * @store: function to load new value from character buffer and save it
+        * in batman-adv attribute
+        */
        ssize_t (*store)(struct kobject *kobj, struct attribute *attr,
                         char *buf, size_t count);
 };
index ebc4e22..8b57671 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2012-2017  B.A.T.M.A.N. contributors:
  *
  * Edo Monticelli, Antonio Quartulli
 #include "main.h"
 
 #include <linux/atomic.h>
-#include <linux/bug.h>
+#include <linux/build_bug.h>
 #include <linux/byteorder/generic.h>
 #include <linux/cache.h>
 #include <linux/compiler.h>
 #include <linux/err.h>
 #include <linux/etherdevice.h>
-#include <linux/fs.h>
+#include <linux/gfp.h>
 #include <linux/if_ether.h>
 #include <linux/init.h>
 #include <linux/jiffies.h>
 #include <linux/timer.h>
 #include <linux/wait.h>
 #include <linux/workqueue.h>
+#include <uapi/linux/batadv_packet.h>
 #include <uapi/linux/batman_adv.h>
 
 #include "hard-interface.h"
 #include "log.h"
 #include "netlink.h"
 #include "originator.h"
-#include "packet.h"
 #include "send.h"
 
 /**
@@ -97,7 +98,7 @@
 static u8 batadv_tp_prerandom[4096] __read_mostly;
 
 /**
- * batadv_tp_session_cookie - generate session cookie based on session ids
+ * batadv_tp_session_cookie() - generate session cookie based on session ids
  * @session: TP session identifier
  * @icmp_uid: icmp pseudo uid of the tp session
  *
@@ -115,7 +116,7 @@ static u32 batadv_tp_session_cookie(const u8 session[2], u8 icmp_uid)
 }
 
 /**
- * batadv_tp_cwnd - compute the new cwnd size
+ * batadv_tp_cwnd() - compute the new cwnd size
  * @base: base cwnd size value
  * @increment: the value to add to base to get the new size
  * @min: minumim cwnd value (usually MSS)
@@ -140,7 +141,7 @@ static u32 batadv_tp_cwnd(u32 base, u32 increment, u32 min)
 }
 
 /**
- * batadv_tp_updated_cwnd - update the Congestion Windows
+ * batadv_tp_updated_cwnd() - update the Congestion Windows
  * @tp_vars: the private data of the current TP meter session
  * @mss: maximum segment size of transmission
  *
@@ -176,7 +177,7 @@ static void batadv_tp_update_cwnd(struct batadv_tp_vars *tp_vars, u32 mss)
 }
 
 /**
- * batadv_tp_update_rto - calculate new retransmission timeout
+ * batadv_tp_update_rto() - calculate new retransmission timeout
  * @tp_vars: the private data of the current TP meter session
  * @new_rtt: new roundtrip time in msec
  */
@@ -212,7 +213,7 @@ static void batadv_tp_update_rto(struct batadv_tp_vars *tp_vars,
 }
 
 /**
- * batadv_tp_batctl_notify - send client status result to client
+ * batadv_tp_batctl_notify() - send client status result to client
  * @reason: reason for tp meter session stop
  * @dst: destination of tp_meter session
  * @bat_priv: the bat priv with all the soft interface information
@@ -244,7 +245,7 @@ static void batadv_tp_batctl_notify(enum batadv_tp_meter_reason reason,
 }
 
 /**
- * batadv_tp_batctl_error_notify - send client error result to client
+ * batadv_tp_batctl_error_notify() - send client error result to client
  * @reason: reason for tp meter session stop
  * @dst: destination of tp_meter session
  * @bat_priv: the bat priv with all the soft interface information
@@ -259,7 +260,7 @@ static void batadv_tp_batctl_error_notify(enum batadv_tp_meter_reason reason,
 }
 
 /**
- * batadv_tp_list_find - find a tp_vars object in the global list
+ * batadv_tp_list_find() - find a tp_vars object in the global list
  * @bat_priv: the bat priv with all the soft interface information
  * @dst: the other endpoint MAC address to look for
  *
@@ -294,7 +295,8 @@ static struct batadv_tp_vars *batadv_tp_list_find(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tp_list_find_session - find tp_vars session object in the global list
+ * batadv_tp_list_find_session() - find tp_vars session object in the global
+ *  list
  * @bat_priv: the bat priv with all the soft interface information
  * @dst: the other endpoint MAC address to look for
  * @session: session identifier
@@ -335,7 +337,7 @@ batadv_tp_list_find_session(struct batadv_priv *bat_priv, const u8 *dst,
 }
 
 /**
- * batadv_tp_vars_release - release batadv_tp_vars from lists and queue for
+ * batadv_tp_vars_release() - release batadv_tp_vars from lists and queue for
  *  free after rcu grace period
  * @ref: kref pointer of the batadv_tp_vars
  */
@@ -360,7 +362,7 @@ static void batadv_tp_vars_release(struct kref *ref)
 }
 
 /**
- * batadv_tp_vars_put - decrement the batadv_tp_vars refcounter and possibly
+ * batadv_tp_vars_put() - decrement the batadv_tp_vars refcounter and possibly
  *  release it
  * @tp_vars: the private data of the current TP meter session to be free'd
  */
@@ -370,7 +372,7 @@ static void batadv_tp_vars_put(struct batadv_tp_vars *tp_vars)
 }
 
 /**
- * batadv_tp_sender_cleanup - cleanup sender data and drop and timer
+ * batadv_tp_sender_cleanup() - cleanup sender data and drop and timer
  * @bat_priv: the bat priv with all the soft interface information
  * @tp_vars: the private data of the current TP meter session to cleanup
  */
@@ -400,7 +402,7 @@ static void batadv_tp_sender_cleanup(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tp_sender_end - print info about ended session and inform client
+ * batadv_tp_sender_end() - print info about ended session and inform client
  * @bat_priv: the bat priv with all the soft interface information
  * @tp_vars: the private data of the current TP meter session
  */
@@ -433,7 +435,7 @@ static void batadv_tp_sender_end(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tp_sender_shutdown - let sender thread/timer stop gracefully
+ * batadv_tp_sender_shutdown() - let sender thread/timer stop gracefully
  * @tp_vars: the private data of the current TP meter session
  * @reason: reason for tp meter session stop
  */
@@ -447,7 +449,7 @@ static void batadv_tp_sender_shutdown(struct batadv_tp_vars *tp_vars,
 }
 
 /**
- * batadv_tp_sender_finish - stop sender session after test_length was reached
+ * batadv_tp_sender_finish() - stop sender session after test_length was reached
  * @work: delayed work reference of the related tp_vars
  */
 static void batadv_tp_sender_finish(struct work_struct *work)
@@ -463,7 +465,7 @@ static void batadv_tp_sender_finish(struct work_struct *work)
 }
 
 /**
- * batadv_tp_reset_sender_timer - reschedule the sender timer
+ * batadv_tp_reset_sender_timer() - reschedule the sender timer
  * @tp_vars: the private TP meter data for this session
  *
  * Reschedule the timer using tp_vars->rto as delay
@@ -481,7 +483,7 @@ static void batadv_tp_reset_sender_timer(struct batadv_tp_vars *tp_vars)
 }
 
 /**
- * batadv_tp_sender_timeout - timer that fires in case of packet loss
+ * batadv_tp_sender_timeout() - timer that fires in case of packet loss
  * @t: address to timer_list inside tp_vars
  *
  * If fired it means that there was packet loss.
@@ -531,7 +533,7 @@ static void batadv_tp_sender_timeout(struct timer_list *t)
 }
 
 /**
- * batadv_tp_fill_prerandom - Fill buffer with prefetched random bytes
+ * batadv_tp_fill_prerandom() - Fill buffer with prefetched random bytes
  * @tp_vars: the private TP meter data for this session
  * @buf: Buffer to fill with bytes
  * @nbytes: amount of pseudorandom bytes
@@ -563,7 +565,7 @@ static void batadv_tp_fill_prerandom(struct batadv_tp_vars *tp_vars,
 }
 
 /**
- * batadv_tp_send_msg - send a single message
+ * batadv_tp_send_msg() - send a single message
  * @tp_vars: the private TP meter data for this session
  * @src: source mac address
  * @orig_node: the originator of the destination
@@ -623,7 +625,7 @@ static int batadv_tp_send_msg(struct batadv_tp_vars *tp_vars, const u8 *src,
 }
 
 /**
- * batadv_tp_recv_ack - ACK receiving function
+ * batadv_tp_recv_ack() - ACK receiving function
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: the buffer containing the received packet
  *
@@ -765,7 +767,7 @@ out:
 }
 
 /**
- * batadv_tp_avail - check if congestion window is not full
+ * batadv_tp_avail() - check if congestion window is not full
  * @tp_vars: the private data of the current TP meter session
  * @payload_len: size of the payload of a single message
  *
@@ -783,7 +785,7 @@ static bool batadv_tp_avail(struct batadv_tp_vars *tp_vars,
 }
 
 /**
- * batadv_tp_wait_available - wait until congestion window becomes free or
+ * batadv_tp_wait_available() - wait until congestion window becomes free or
  *  timeout is reached
  * @tp_vars: the private data of the current TP meter session
  * @plen: size of the payload of a single message
@@ -805,7 +807,7 @@ static int batadv_tp_wait_available(struct batadv_tp_vars *tp_vars, size_t plen)
 }
 
 /**
- * batadv_tp_send - main sending thread of a tp meter session
+ * batadv_tp_send() - main sending thread of a tp meter session
  * @arg: address of the related tp_vars
  *
  * Return: nothing, this function never returns
@@ -904,7 +906,8 @@ out:
 }
 
 /**
- * batadv_tp_start_kthread - start new thread which manages the tp meter sender
+ * batadv_tp_start_kthread() - start new thread which manages the tp meter
+ *  sender
  * @tp_vars: the private data of the current TP meter session
  */
 static void batadv_tp_start_kthread(struct batadv_tp_vars *tp_vars)
@@ -935,7 +938,7 @@ static void batadv_tp_start_kthread(struct batadv_tp_vars *tp_vars)
 }
 
 /**
- * batadv_tp_start - start a new tp meter session
+ * batadv_tp_start() - start a new tp meter session
  * @bat_priv: the bat priv with all the soft interface information
  * @dst: the receiver MAC address
  * @test_length: test length in milliseconds
@@ -1060,7 +1063,7 @@ void batadv_tp_start(struct batadv_priv *bat_priv, const u8 *dst,
 }
 
 /**
- * batadv_tp_stop - stop currently running tp meter session
+ * batadv_tp_stop() - stop currently running tp meter session
  * @bat_priv: the bat priv with all the soft interface information
  * @dst: the receiver MAC address
  * @return_value: reason for tp meter session stop
@@ -1092,7 +1095,7 @@ out:
 }
 
 /**
- * batadv_tp_reset_receiver_timer - reset the receiver shutdown timer
+ * batadv_tp_reset_receiver_timer() - reset the receiver shutdown timer
  * @tp_vars: the private data of the current TP meter session
  *
  * start the receiver shutdown timer or reset it if already started
@@ -1104,7 +1107,7 @@ static void batadv_tp_reset_receiver_timer(struct batadv_tp_vars *tp_vars)
 }
 
 /**
- * batadv_tp_receiver_shutdown - stop a tp meter receiver when timeout is
+ * batadv_tp_receiver_shutdown() - stop a tp meter receiver when timeout is
  *  reached without received ack
  * @t: address to timer_list inside tp_vars
  */
@@ -1149,7 +1152,7 @@ static void batadv_tp_receiver_shutdown(struct timer_list *t)
 }
 
 /**
- * batadv_tp_send_ack - send an ACK packet
+ * batadv_tp_send_ack() - send an ACK packet
  * @bat_priv: the bat priv with all the soft interface information
  * @dst: the mac address of the destination originator
  * @seq: the sequence number to ACK
@@ -1221,7 +1224,7 @@ out:
 }
 
 /**
- * batadv_tp_handle_out_of_order - store an out of order packet
+ * batadv_tp_handle_out_of_order() - store an out of order packet
  * @tp_vars: the private data of the current TP meter session
  * @skb: the buffer containing the received packet
  *
@@ -1297,7 +1300,7 @@ out:
 }
 
 /**
- * batadv_tp_ack_unordered - update number received bytes in current stream
+ * batadv_tp_ack_unordered() - update number received bytes in current stream
  *  without gaps
  * @tp_vars: the private data of the current TP meter session
  */
@@ -1330,7 +1333,7 @@ static void batadv_tp_ack_unordered(struct batadv_tp_vars *tp_vars)
 }
 
 /**
- * batadv_tp_init_recv - return matching or create new receiver tp_vars
+ * batadv_tp_init_recv() - return matching or create new receiver tp_vars
  * @bat_priv: the bat priv with all the soft interface information
  * @icmp: received icmp tp msg
  *
@@ -1383,7 +1386,7 @@ out_unlock:
 }
 
 /**
- * batadv_tp_recv_msg - process a single data message
+ * batadv_tp_recv_msg() - process a single data message
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: the buffer containing the received packet
  *
@@ -1468,7 +1471,7 @@ out:
 }
 
 /**
- * batadv_tp_meter_recv - main TP Meter receiving function
+ * batadv_tp_meter_recv() - main TP Meter receiving function
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: the buffer containing the received packet
  */
@@ -1494,7 +1497,7 @@ void batadv_tp_meter_recv(struct batadv_priv *bat_priv, struct sk_buff *skb)
 }
 
 /**
- * batadv_tp_meter_init - initialize global tp_meter structures
+ * batadv_tp_meter_init() - initialize global tp_meter structures
  */
 void __init batadv_tp_meter_init(void)
 {
index a8ada5c..c8b8f2c 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2012-2017  B.A.T.M.A.N. contributors:
  *
  * Edo Monticelli, Antonio Quartulli
index 8a3ce79..7550a9c 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich, Antonio Quartulli
 
 #include <linux/atomic.h>
 #include <linux/bitops.h>
-#include <linux/bug.h>
+#include <linux/build_bug.h>
 #include <linux/byteorder/generic.h>
 #include <linux/cache.h>
 #include <linux/compiler.h>
 #include <linux/crc32c.h>
 #include <linux/errno.h>
 #include <linux/etherdevice.h>
-#include <linux/fs.h>
+#include <linux/gfp.h>
 #include <linux/if_ether.h>
 #include <linux/init.h>
 #include <linux/jhash.h>
@@ -36,6 +37,7 @@
 #include <linux/kref.h>
 #include <linux/list.h>
 #include <linux/lockdep.h>
+#include <linux/net.h>
 #include <linux/netdevice.h>
 #include <linux/netlink.h>
 #include <linux/rculist.h>
@@ -50,6 +52,7 @@
 #include <net/genetlink.h>
 #include <net/netlink.h>
 #include <net/sock.h>
+#include <uapi/linux/batadv_packet.h>
 #include <uapi/linux/batman_adv.h>
 
 #include "bridge_loop_avoidance.h"
@@ -58,7 +61,6 @@
 #include "log.h"
 #include "netlink.h"
 #include "originator.h"
-#include "packet.h"
 #include "soft-interface.h"
 #include "tvlv.h"
 
@@ -86,7 +88,7 @@ static void batadv_tt_global_del(struct batadv_priv *bat_priv,
                                 bool roaming);
 
 /**
- * batadv_compare_tt - check if two TT entries are the same
+ * batadv_compare_tt() - check if two TT entries are the same
  * @node: the list element pointer of the first TT entry
  * @data2: pointer to the tt_common_entry of the second TT entry
  *
@@ -105,7 +107,7 @@ static bool batadv_compare_tt(const struct hlist_node *node, const void *data2)
 }
 
 /**
- * batadv_choose_tt - return the index of the tt entry in the hash table
+ * batadv_choose_tt() - return the index of the tt entry in the hash table
  * @data: pointer to the tt_common_entry object to map
  * @size: the size of the hash table
  *
@@ -125,7 +127,7 @@ static inline u32 batadv_choose_tt(const void *data, u32 size)
 }
 
 /**
- * batadv_tt_hash_find - look for a client in the given hash table
+ * batadv_tt_hash_find() - look for a client in the given hash table
  * @hash: the hash table to search
  * @addr: the mac address of the client to look for
  * @vid: VLAN identifier
@@ -170,7 +172,7 @@ batadv_tt_hash_find(struct batadv_hashtable *hash, const u8 *addr,
 }
 
 /**
- * batadv_tt_local_hash_find - search the local table for a given client
+ * batadv_tt_local_hash_find() - search the local table for a given client
  * @bat_priv: the bat priv with all the soft interface information
  * @addr: the mac address of the client to look for
  * @vid: VLAN identifier
@@ -195,7 +197,7 @@ batadv_tt_local_hash_find(struct batadv_priv *bat_priv, const u8 *addr,
 }
 
 /**
- * batadv_tt_global_hash_find - search the global table for a given client
+ * batadv_tt_global_hash_find() - search the global table for a given client
  * @bat_priv: the bat priv with all the soft interface information
  * @addr: the mac address of the client to look for
  * @vid: VLAN identifier
@@ -220,7 +222,7 @@ batadv_tt_global_hash_find(struct batadv_priv *bat_priv, const u8 *addr,
 }
 
 /**
- * batadv_tt_local_entry_free_rcu - free the tt_local_entry
+ * batadv_tt_local_entry_free_rcu() - free the tt_local_entry
  * @rcu: rcu pointer of the tt_local_entry
  */
 static void batadv_tt_local_entry_free_rcu(struct rcu_head *rcu)
@@ -234,7 +236,7 @@ static void batadv_tt_local_entry_free_rcu(struct rcu_head *rcu)
 }
 
 /**
- * batadv_tt_local_entry_release - release tt_local_entry from lists and queue
+ * batadv_tt_local_entry_release() - release tt_local_entry from lists and queue
  *  for free after rcu grace period
  * @ref: kref pointer of the nc_node
  */
@@ -251,7 +253,7 @@ static void batadv_tt_local_entry_release(struct kref *ref)
 }
 
 /**
- * batadv_tt_local_entry_put - decrement the tt_local_entry refcounter and
+ * batadv_tt_local_entry_put() - decrement the tt_local_entry refcounter and
  *  possibly release it
  * @tt_local_entry: tt_local_entry to be free'd
  */
@@ -263,7 +265,7 @@ batadv_tt_local_entry_put(struct batadv_tt_local_entry *tt_local_entry)
 }
 
 /**
- * batadv_tt_global_entry_free_rcu - free the tt_global_entry
+ * batadv_tt_global_entry_free_rcu() - free the tt_global_entry
  * @rcu: rcu pointer of the tt_global_entry
  */
 static void batadv_tt_global_entry_free_rcu(struct rcu_head *rcu)
@@ -277,8 +279,8 @@ static void batadv_tt_global_entry_free_rcu(struct rcu_head *rcu)
 }
 
 /**
- * batadv_tt_global_entry_release - release tt_global_entry from lists and queue
- *  for free after rcu grace period
+ * batadv_tt_global_entry_release() - release tt_global_entry from lists and
+ *  queue for free after rcu grace period
  * @ref: kref pointer of the nc_node
  */
 static void batadv_tt_global_entry_release(struct kref *ref)
@@ -294,7 +296,7 @@ static void batadv_tt_global_entry_release(struct kref *ref)
 }
 
 /**
- * batadv_tt_global_entry_put - decrement the tt_global_entry refcounter and
+ * batadv_tt_global_entry_put() - decrement the tt_global_entry refcounter and
  *  possibly release it
  * @tt_global_entry: tt_global_entry to be free'd
  */
@@ -306,7 +308,7 @@ batadv_tt_global_entry_put(struct batadv_tt_global_entry *tt_global_entry)
 }
 
 /**
- * batadv_tt_global_hash_count - count the number of orig entries
+ * batadv_tt_global_hash_count() - count the number of orig entries
  * @bat_priv: the bat priv with all the soft interface information
  * @addr: the mac address of the client to count entries for
  * @vid: VLAN identifier
@@ -331,8 +333,8 @@ int batadv_tt_global_hash_count(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tt_local_size_mod - change the size by v of the local table identified
- *  by vid
+ * batadv_tt_local_size_mod() - change the size by v of the local table
+ *  identified by vid
  * @bat_priv: the bat priv with all the soft interface information
  * @vid: the VLAN identifier of the sub-table to change
  * @v: the amount to sum to the local table size
@@ -352,8 +354,8 @@ static void batadv_tt_local_size_mod(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tt_local_size_inc - increase by one the local table size for the given
- *  vid
+ * batadv_tt_local_size_inc() - increase by one the local table size for the
+ *  given vid
  * @bat_priv: the bat priv with all the soft interface information
  * @vid: the VLAN identifier
  */
@@ -364,8 +366,8 @@ static void batadv_tt_local_size_inc(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tt_local_size_dec - decrease by one the local table size for the given
- *  vid
+ * batadv_tt_local_size_dec() - decrease by one the local table size for the
+ *  given vid
  * @bat_priv: the bat priv with all the soft interface information
  * @vid: the VLAN identifier
  */
@@ -376,7 +378,7 @@ static void batadv_tt_local_size_dec(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tt_global_size_mod - change the size by v of the global table
+ * batadv_tt_global_size_mod() - change the size by v of the global table
  *  for orig_node identified by vid
  * @orig_node: the originator for which the table has to be modified
  * @vid: the VLAN identifier
@@ -404,7 +406,7 @@ static void batadv_tt_global_size_mod(struct batadv_orig_node *orig_node,
 }
 
 /**
- * batadv_tt_global_size_inc - increase by one the global table size for the
+ * batadv_tt_global_size_inc() - increase by one the global table size for the
  *  given vid
  * @orig_node: the originator which global table size has to be decreased
  * @vid: the vlan identifier
@@ -416,7 +418,7 @@ static void batadv_tt_global_size_inc(struct batadv_orig_node *orig_node,
 }
 
 /**
- * batadv_tt_global_size_dec - decrease by one the global table size for the
+ * batadv_tt_global_size_dec() - decrease by one the global table size for the
  *  given vid
  * @orig_node: the originator which global table size has to be decreased
  * @vid: the vlan identifier
@@ -428,7 +430,7 @@ static void batadv_tt_global_size_dec(struct batadv_orig_node *orig_node,
 }
 
 /**
- * batadv_tt_orig_list_entry_free_rcu - free the orig_entry
+ * batadv_tt_orig_list_entry_free_rcu() - free the orig_entry
  * @rcu: rcu pointer of the orig_entry
  */
 static void batadv_tt_orig_list_entry_free_rcu(struct rcu_head *rcu)
@@ -441,7 +443,7 @@ static void batadv_tt_orig_list_entry_free_rcu(struct rcu_head *rcu)
 }
 
 /**
- * batadv_tt_orig_list_entry_release - release tt orig entry from lists and
+ * batadv_tt_orig_list_entry_release() - release tt orig entry from lists and
  *  queue for free after rcu grace period
  * @ref: kref pointer of the tt orig entry
  */
@@ -457,7 +459,7 @@ static void batadv_tt_orig_list_entry_release(struct kref *ref)
 }
 
 /**
- * batadv_tt_orig_list_entry_put - decrement the tt orig entry refcounter and
+ * batadv_tt_orig_list_entry_put() - decrement the tt orig entry refcounter and
  *  possibly release it
  * @orig_entry: tt orig entry to be free'd
  */
@@ -468,7 +470,7 @@ batadv_tt_orig_list_entry_put(struct batadv_tt_orig_list_entry *orig_entry)
 }
 
 /**
- * batadv_tt_local_event - store a local TT event (ADD/DEL)
+ * batadv_tt_local_event() - store a local TT event (ADD/DEL)
  * @bat_priv: the bat priv with all the soft interface information
  * @tt_local_entry: the TT entry involved in the event
  * @event_flags: flags to store in the event structure
@@ -543,7 +545,7 @@ unlock:
 }
 
 /**
- * batadv_tt_len - compute length in bytes of given number of tt changes
+ * batadv_tt_len() - compute length in bytes of given number of tt changes
  * @changes_num: number of tt changes
  *
  * Return: computed length in bytes.
@@ -554,7 +556,7 @@ static int batadv_tt_len(int changes_num)
 }
 
 /**
- * batadv_tt_entries - compute the number of entries fitting in tt_len bytes
+ * batadv_tt_entries() - compute the number of entries fitting in tt_len bytes
  * @tt_len: available space
  *
  * Return: the number of entries.
@@ -565,8 +567,8 @@ static u16 batadv_tt_entries(u16 tt_len)
 }
 
 /**
- * batadv_tt_local_table_transmit_size - calculates the local translation table
- *  size when transmitted over the air
+ * batadv_tt_local_table_transmit_size() - calculates the local translation
+ *  table size when transmitted over the air
  * @bat_priv: the bat priv with all the soft interface information
  *
  * Return: local translation table size in bytes.
@@ -625,7 +627,7 @@ static void batadv_tt_global_free(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tt_local_add - add a new client to the local table or update an
+ * batadv_tt_local_add() - add a new client to the local table or update an
  *  existing client
  * @soft_iface: netdev struct of the mesh interface
  * @addr: the mac address of the client to add
@@ -830,7 +832,7 @@ out:
 }
 
 /**
- * batadv_tt_prepare_tvlv_global_data - prepare the TVLV TT header to send
+ * batadv_tt_prepare_tvlv_global_data() - prepare the TVLV TT header to send
  *  within a TT Response directed to another node
  * @orig_node: originator for which the TT data has to be prepared
  * @tt_data: uninitialised pointer to the address of the TVLV buffer
@@ -903,8 +905,8 @@ out:
 }
 
 /**
- * batadv_tt_prepare_tvlv_local_data - allocate and prepare the TT TVLV for this
- *  node
+ * batadv_tt_prepare_tvlv_local_data() - allocate and prepare the TT TVLV for
+ *  this node
  * @bat_priv: the bat priv with all the soft interface information
  * @tt_data: uninitialised pointer to the address of the TVLV buffer
  * @tt_change: uninitialised pointer to the address of the area where the TT
@@ -977,8 +979,8 @@ out:
 }
 
 /**
- * batadv_tt_tvlv_container_update - update the translation table tvlv container
- *  after local tt changes have been committed
+ * batadv_tt_tvlv_container_update() - update the translation table tvlv
+ *  container after local tt changes have been committed
  * @bat_priv: the bat priv with all the soft interface information
  */
 static void batadv_tt_tvlv_container_update(struct batadv_priv *bat_priv)
@@ -1053,6 +1055,14 @@ container_register:
 }
 
 #ifdef CONFIG_BATMAN_ADV_DEBUGFS
+
+/**
+ * batadv_tt_local_seq_print_text() - Print the local tt table in a seq file
+ * @seq: seq file to print on
+ * @offset: not used
+ *
+ * Return: always 0
+ */
 int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset)
 {
        struct net_device *net_dev = (struct net_device *)seq->private;
@@ -1123,7 +1133,7 @@ out:
 #endif
 
 /**
- * batadv_tt_local_dump_entry - Dump one TT local entry into a message
+ * batadv_tt_local_dump_entry() - Dump one TT local entry into a message
  * @msg :Netlink message to dump into
  * @portid: Port making netlink request
  * @seq: Sequence number of netlink message
@@ -1179,7 +1189,7 @@ batadv_tt_local_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
 }
 
 /**
- * batadv_tt_local_dump_bucket - Dump one TT local bucket into a message
+ * batadv_tt_local_dump_bucket() - Dump one TT local bucket into a message
  * @msg: Netlink message to dump into
  * @portid: Port making netlink request
  * @seq: Sequence number of netlink message
@@ -1216,7 +1226,7 @@ batadv_tt_local_dump_bucket(struct sk_buff *msg, u32 portid, u32 seq,
 }
 
 /**
- * batadv_tt_local_dump - Dump TT local entries into a message
+ * batadv_tt_local_dump() - Dump TT local entries into a message
  * @msg: Netlink message to dump into
  * @cb: Parameters from query
  *
@@ -1300,7 +1310,7 @@ batadv_tt_local_set_pending(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tt_local_remove - logically remove an entry from the local table
+ * batadv_tt_local_remove() - logically remove an entry from the local table
  * @bat_priv: the bat priv with all the soft interface information
  * @addr: the MAC address of the client to remove
  * @vid: VLAN identifier
@@ -1362,7 +1372,7 @@ out:
 }
 
 /**
- * batadv_tt_local_purge_list - purge inactive tt local entries
+ * batadv_tt_local_purge_list() - purge inactive tt local entries
  * @bat_priv: the bat priv with all the soft interface information
  * @head: pointer to the list containing the local tt entries
  * @timeout: parameter deciding whether a given tt local entry is considered
@@ -1397,7 +1407,7 @@ static void batadv_tt_local_purge_list(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tt_local_purge - purge inactive tt local entries
+ * batadv_tt_local_purge() - purge inactive tt local entries
  * @bat_priv: the bat priv with all the soft interface information
  * @timeout: parameter deciding whether a given tt local entry is considered
  *  inactive or not
@@ -1490,7 +1500,7 @@ static void batadv_tt_changes_list_free(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_tt_global_orig_entry_find - find a TT orig_list_entry
+ * batadv_tt_global_orig_entry_find() - find a TT orig_list_entry
  * @entry: the TT global entry where the orig_list_entry has to be
  *  extracted from
  * @orig_node: the originator for which the orig_list_entry has to be found
@@ -1524,8 +1534,8 @@ batadv_tt_global_orig_entry_find(const struct batadv_tt_global_entry *entry,
 }
 
 /**
- * batadv_tt_global_entry_has_orig - check if a TT global entry is also handled
- *  by a given originator
+ * batadv_tt_global_entry_has_orig() - check if a TT global entry is also
+ *  handled by a given originator
  * @entry: the TT global entry to check
  * @orig_node: the originator to search in the list
  *
@@ -1550,7 +1560,7 @@ batadv_tt_global_entry_has_orig(const struct batadv_tt_global_entry *entry,
 }
 
 /**
- * batadv_tt_global_sync_flags - update TT sync flags
+ * batadv_tt_global_sync_flags() - update TT sync flags
  * @tt_global: the TT global entry to update sync flags in
  *
  * Updates the sync flag bits in the tt_global flag attribute with a logical
@@ -1574,7 +1584,7 @@ batadv_tt_global_sync_flags(struct batadv_tt_global_entry *tt_global)
 }
 
 /**
- * batadv_tt_global_orig_entry_add - add or update a TT orig entry
+ * batadv_tt_global_orig_entry_add() - add or update a TT orig entry
  * @tt_global: the TT global entry to add an orig entry in
  * @orig_node: the originator to add an orig entry for
  * @ttvn: translation table version number of this changeset
@@ -1624,7 +1634,7 @@ out:
 }
 
 /**
- * batadv_tt_global_add - add a new TT global entry or update an existing one
+ * batadv_tt_global_add() - add a new TT global entry or update an existing one
  * @bat_priv: the bat priv with all the soft interface information
  * @orig_node: the originator announcing the client
  * @tt_addr: the mac address of the non-mesh client
@@ -1796,7 +1806,7 @@ out:
 }
 
 /**
- * batadv_transtable_best_orig - Get best originator list entry from tt entry
+ * batadv_transtable_best_orig() - Get best originator list entry from tt entry
  * @bat_priv: the bat priv with all the soft interface information
  * @tt_global_entry: global translation table entry to be analyzed
  *
@@ -1842,8 +1852,8 @@ batadv_transtable_best_orig(struct batadv_priv *bat_priv,
 
 #ifdef CONFIG_BATMAN_ADV_DEBUGFS
 /**
- * batadv_tt_global_print_entry - print all orig nodes who announce the address
- *  for this global entry
+ * batadv_tt_global_print_entry() - print all orig nodes who announce the
+ *  address for this global entry
  * @bat_priv: the bat priv with all the soft interface information
  * @tt_global_entry: global translation table entry to be printed
  * @seq: debugfs table seq_file struct
@@ -1925,6 +1935,13 @@ print_list:
        }
 }
 
+/**
+ * batadv_tt_global_seq_print_text() - Print the global tt table in a seq file
+ * @seq: seq file to print on
+ * @offset: not used
+ *
+ * Return: always 0
+ */
 int batadv_tt_global_seq_print_text(struct seq_file *seq, void *offset)
 {
        struct net_device *net_dev = (struct net_device *)seq->private;
@@ -1967,7 +1984,7 @@ out:
 #endif
 
 /**
- * batadv_tt_global_dump_subentry - Dump all TT local entries into a message
+ * batadv_tt_global_dump_subentry() - Dump all TT local entries into a message
  * @msg: Netlink message to dump into
  * @portid: Port making netlink request
  * @seq: Sequence number of netlink message
@@ -2028,7 +2045,7 @@ batadv_tt_global_dump_subentry(struct sk_buff *msg, u32 portid, u32 seq,
 }
 
 /**
- * batadv_tt_global_dump_entry - Dump one TT global entry into a message
+ * batadv_tt_global_dump_entry() - Dump one TT global entry into a message
  * @msg: Netlink message to dump into
  * @portid: Port making netlink request
  * @seq: Sequence number of netlink message
@@ -2073,7 +2090,7 @@ batadv_tt_global_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
 }
 
 /**
- * batadv_tt_global_dump_bucket - Dump one TT local bucket into a message
+ * batadv_tt_global_dump_bucket() - Dump one TT local bucket into a message
  * @msg: Netlink message to dump into
  * @portid: Port making netlink request
  * @seq: Sequence number of netlink message
@@ -2112,7 +2129,7 @@ batadv_tt_global_dump_bucket(struct sk_buff *msg, u32 portid, u32 seq,
 }
 
 /**
- * batadv_tt_global_dump -  Dump TT global entries into a message
+ * batadv_tt_global_dump() -  Dump TT global entries into a message
  * @msg: Netlink message to dump into
  * @cb: Parameters from query
  *
@@ -2180,7 +2197,7 @@ int batadv_tt_global_dump(struct sk_buff *msg, struct netlink_callback *cb)
 }
 
 /**
- * _batadv_tt_global_del_orig_entry - remove and free an orig_entry
+ * _batadv_tt_global_del_orig_entry() - remove and free an orig_entry
  * @tt_global_entry: the global entry to remove the orig_entry from
  * @orig_entry: the orig entry to remove and free
  *
@@ -2222,7 +2239,7 @@ batadv_tt_global_del_orig_list(struct batadv_tt_global_entry *tt_global_entry)
 }
 
 /**
- * batadv_tt_global_del_orig_node - remove orig_node from a global tt entry
+ * batadv_tt_global_del_orig_node() - remove orig_node from a global tt entry
  * @bat_priv: the bat priv with all the soft interface information
  * @tt_global_entry: the global entry to remove the orig_node from
  * @orig_node: the originator announcing the client
@@ -2301,7 +2318,7 @@ batadv_tt_global_del_roaming(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tt_global_del - remove a client from the global table
+ * batadv_tt_global_del() - remove a client from the global table
  * @bat_priv: the bat priv with all the soft interface information
  * @orig_node: an originator serving this client
  * @addr: the mac address of the client
@@ -2367,8 +2384,8 @@ out:
 }
 
 /**
- * batadv_tt_global_del_orig - remove all the TT global entries belonging to the
- *  given originator matching the provided vid
+ * batadv_tt_global_del_orig() - remove all the TT global entries belonging to
+ *  the given originator matching the provided vid
  * @bat_priv: the bat priv with all the soft interface information
  * @orig_node: the originator owning the entries to remove
  * @match_vid: the VLAN identifier to match. If negative all the entries will be
@@ -2539,7 +2556,7 @@ _batadv_is_ap_isolated(struct batadv_tt_local_entry *tt_local_entry,
 }
 
 /**
- * batadv_transtable_search - get the mesh destination for a given client
+ * batadv_transtable_search() - get the mesh destination for a given client
  * @bat_priv: the bat priv with all the soft interface information
  * @src: mac address of the source client
  * @addr: mac address of the destination client
@@ -2599,7 +2616,7 @@ out:
 }
 
 /**
- * batadv_tt_global_crc - calculates the checksum of the local table belonging
+ * batadv_tt_global_crc() - calculates the checksum of the local table belonging
  *  to the given orig_node
  * @bat_priv: the bat priv with all the soft interface information
  * @orig_node: originator for which the CRC should be computed
@@ -2694,7 +2711,7 @@ static u32 batadv_tt_global_crc(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tt_local_crc - calculates the checksum of the local table
+ * batadv_tt_local_crc() - calculates the checksum of the local table
  * @bat_priv: the bat priv with all the soft interface information
  * @vid: VLAN identifier for which the CRC32 has to be computed
  *
@@ -2751,7 +2768,7 @@ static u32 batadv_tt_local_crc(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tt_req_node_release - free tt_req node entry
+ * batadv_tt_req_node_release() - free tt_req node entry
  * @ref: kref pointer of the tt req_node entry
  */
 static void batadv_tt_req_node_release(struct kref *ref)
@@ -2764,7 +2781,7 @@ static void batadv_tt_req_node_release(struct kref *ref)
 }
 
 /**
- * batadv_tt_req_node_put - decrement the tt_req_node refcounter and
+ * batadv_tt_req_node_put() - decrement the tt_req_node refcounter and
  *  possibly release it
  * @tt_req_node: tt_req_node to be free'd
  */
@@ -2826,7 +2843,7 @@ static void batadv_tt_req_purge(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_tt_req_node_new - search and possibly create a tt_req_node object
+ * batadv_tt_req_node_new() - search and possibly create a tt_req_node object
  * @bat_priv: the bat priv with all the soft interface information
  * @orig_node: orig node this request is being issued for
  *
@@ -2863,7 +2880,7 @@ unlock:
 }
 
 /**
- * batadv_tt_local_valid - verify that given tt entry is a valid one
+ * batadv_tt_local_valid() - verify that given tt entry is a valid one
  * @entry_ptr: to be checked local tt entry
  * @data_ptr: not used but definition required to satisfy the callback prototype
  *
@@ -2897,7 +2914,7 @@ static bool batadv_tt_global_valid(const void *entry_ptr,
 }
 
 /**
- * batadv_tt_tvlv_generate - fill the tvlv buff with the tt entries from the
+ * batadv_tt_tvlv_generate() - fill the tvlv buff with the tt entries from the
  *  specified tt hash
  * @bat_priv: the bat priv with all the soft interface information
  * @hash: hash table containing the tt entries
@@ -2948,7 +2965,7 @@ static void batadv_tt_tvlv_generate(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tt_global_check_crc - check if all the CRCs are correct
+ * batadv_tt_global_check_crc() - check if all the CRCs are correct
  * @orig_node: originator for which the CRCs have to be checked
  * @tt_vlan: pointer to the first tvlv VLAN entry
  * @num_vlan: number of tvlv VLAN entries
@@ -3005,7 +3022,7 @@ static bool batadv_tt_global_check_crc(struct batadv_orig_node *orig_node,
 }
 
 /**
- * batadv_tt_local_update_crc - update all the local CRCs
+ * batadv_tt_local_update_crc() - update all the local CRCs
  * @bat_priv: the bat priv with all the soft interface information
  */
 static void batadv_tt_local_update_crc(struct batadv_priv *bat_priv)
@@ -3021,7 +3038,7 @@ static void batadv_tt_local_update_crc(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_tt_global_update_crc - update all the global CRCs for this orig_node
+ * batadv_tt_global_update_crc() - update all the global CRCs for this orig_node
  * @bat_priv: the bat priv with all the soft interface information
  * @orig_node: the orig_node for which the CRCs have to be updated
  */
@@ -3048,7 +3065,7 @@ static void batadv_tt_global_update_crc(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_send_tt_request - send a TT Request message to a given node
+ * batadv_send_tt_request() - send a TT Request message to a given node
  * @bat_priv: the bat priv with all the soft interface information
  * @dst_orig_node: the destination of the message
  * @ttvn: the version number that the source of the message is looking for
@@ -3137,7 +3154,7 @@ out:
 }
 
 /**
- * batadv_send_other_tt_response - send reply to tt request concerning another
+ * batadv_send_other_tt_response() - send reply to tt request concerning another
  *  node's translation table
  * @bat_priv: the bat priv with all the soft interface information
  * @tt_data: tt data containing the tt request information
@@ -3270,8 +3287,8 @@ out:
 }
 
 /**
- * batadv_send_my_tt_response - send reply to tt request concerning this node's
- *  translation table
+ * batadv_send_my_tt_response() - send reply to tt request concerning this
+ *  node's translation table
  * @bat_priv: the bat priv with all the soft interface information
  * @tt_data: tt data containing the tt request information
  * @req_src: mac address of tt request sender
@@ -3388,7 +3405,7 @@ out:
 }
 
 /**
- * batadv_send_tt_response - send reply to tt request
+ * batadv_send_tt_response() - send reply to tt request
  * @bat_priv: the bat priv with all the soft interface information
  * @tt_data: tt data containing the tt request information
  * @req_src: mac address of tt request sender
@@ -3484,7 +3501,7 @@ static void batadv_tt_update_changes(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_is_my_client - check if a client is served by the local node
+ * batadv_is_my_client() - check if a client is served by the local node
  * @bat_priv: the bat priv with all the soft interface information
  * @addr: the mac address of the client to check
  * @vid: VLAN identifier
@@ -3514,7 +3531,7 @@ out:
 }
 
 /**
- * batadv_handle_tt_response - process incoming tt reply
+ * batadv_handle_tt_response() - process incoming tt reply
  * @bat_priv: the bat priv with all the soft interface information
  * @tt_data: tt data containing the tt request information
  * @resp_src: mac address of tt reply sender
@@ -3607,7 +3624,7 @@ static void batadv_tt_roam_purge(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_tt_check_roam_count - check if a client has roamed too frequently
+ * batadv_tt_check_roam_count() - check if a client has roamed too frequently
  * @bat_priv: the bat priv with all the soft interface information
  * @client: mac address of the roaming client
  *
@@ -3662,7 +3679,7 @@ unlock:
 }
 
 /**
- * batadv_send_roam_adv - send a roaming advertisement message
+ * batadv_send_roam_adv() - send a roaming advertisement message
  * @bat_priv: the bat priv with all the soft interface information
  * @client: mac address of the roaming client
  * @vid: VLAN identifier
@@ -3727,6 +3744,10 @@ static void batadv_tt_purge(struct work_struct *work)
                           msecs_to_jiffies(BATADV_TT_WORK_PERIOD));
 }
 
+/**
+ * batadv_tt_free() - Free translation table of soft interface
+ * @bat_priv: the bat priv with all the soft interface information
+ */
 void batadv_tt_free(struct batadv_priv *bat_priv)
 {
        batadv_tvlv_container_unregister(bat_priv, BATADV_TVLV_TT, 1);
@@ -3744,7 +3765,7 @@ void batadv_tt_free(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_tt_local_set_flags - set or unset the specified flags on the local
+ * batadv_tt_local_set_flags() - set or unset the specified flags on the local
  *  table and possibly count them in the TT size
  * @bat_priv: the bat priv with all the soft interface information
  * @flags: the flag to switch
@@ -3830,7 +3851,7 @@ static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_tt_local_commit_changes_nolock - commit all pending local tt changes
+ * batadv_tt_local_commit_changes_nolock() - commit all pending local tt changes
  *  which have been queued in the time since the last commit
  * @bat_priv: the bat priv with all the soft interface information
  *
@@ -3863,7 +3884,7 @@ static void batadv_tt_local_commit_changes_nolock(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_tt_local_commit_changes - commit all pending local tt changes which
+ * batadv_tt_local_commit_changes() - commit all pending local tt changes which
  *  have been queued in the time since the last commit
  * @bat_priv: the bat priv with all the soft interface information
  */
@@ -3874,6 +3895,15 @@ void batadv_tt_local_commit_changes(struct batadv_priv *bat_priv)
        spin_unlock_bh(&bat_priv->tt.commit_lock);
 }
 
+/**
+ * batadv_is_ap_isolated() - Check if packet from upper layer should be dropped
+ * @bat_priv: the bat priv with all the soft interface information
+ * @src: source mac address of packet
+ * @dst: destination mac address of packet
+ * @vid: vlan id of packet
+ *
+ * Return: true when src+dst(+vid) pair should be isolated, false otherwise
+ */
 bool batadv_is_ap_isolated(struct batadv_priv *bat_priv, u8 *src, u8 *dst,
                           unsigned short vid)
 {
@@ -3909,7 +3939,7 @@ vlan_put:
 }
 
 /**
- * batadv_tt_update_orig - update global translation table with new tt
+ * batadv_tt_update_orig() - update global translation table with new tt
  *  information received via ogms
  * @bat_priv: the bat priv with all the soft interface information
  * @orig_node: the orig_node of the ogm
@@ -3994,7 +4024,7 @@ request_table:
 }
 
 /**
- * batadv_tt_global_client_is_roaming - check if a client is marked as roaming
+ * batadv_tt_global_client_is_roaming() - check if a client is marked as roaming
  * @bat_priv: the bat priv with all the soft interface information
  * @addr: the mac address of the client to check
  * @vid: VLAN identifier
@@ -4020,7 +4050,7 @@ out:
 }
 
 /**
- * batadv_tt_local_client_is_roaming - tells whether the client is roaming
+ * batadv_tt_local_client_is_roaming() - tells whether the client is roaming
  * @bat_priv: the bat priv with all the soft interface information
  * @addr: the mac address of the local client to query
  * @vid: VLAN identifier
@@ -4045,6 +4075,15 @@ out:
        return ret;
 }
 
+/**
+ * batadv_tt_add_temporary_global_entry() - Add temporary entry to global TT
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig_node: orig node which the temporary entry should be associated with
+ * @addr: mac address of the client
+ * @vid: VLAN id of the new temporary global translation table
+ *
+ * Return: true when temporary tt entry could be added, false otherwise
+ */
 bool batadv_tt_add_temporary_global_entry(struct batadv_priv *bat_priv,
                                          struct batadv_orig_node *orig_node,
                                          const unsigned char *addr,
@@ -4069,7 +4108,7 @@ bool batadv_tt_add_temporary_global_entry(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tt_local_resize_to_mtu - resize the local translation table fit the
+ * batadv_tt_local_resize_to_mtu() - resize the local translation table fit the
  *  maximum packet size that can be transported through the mesh
  * @soft_iface: netdev struct of the mesh interface
  *
@@ -4110,7 +4149,7 @@ void batadv_tt_local_resize_to_mtu(struct net_device *soft_iface)
 }
 
 /**
- * batadv_tt_tvlv_ogm_handler_v1 - process incoming tt tvlv container
+ * batadv_tt_tvlv_ogm_handler_v1() - process incoming tt tvlv container
  * @bat_priv: the bat priv with all the soft interface information
  * @orig: the orig_node of the ogm
  * @flags: flags indicating the tvlv state (see batadv_tvlv_handler_flags)
@@ -4149,7 +4188,7 @@ static void batadv_tt_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tt_tvlv_unicast_handler_v1 - process incoming (unicast) tt tvlv
+ * batadv_tt_tvlv_unicast_handler_v1() - process incoming (unicast) tt tvlv
  *  container
  * @bat_priv: the bat priv with all the soft interface information
  * @src: mac address of tt tvlv sender
@@ -4231,7 +4270,8 @@ static int batadv_tt_tvlv_unicast_handler_v1(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_roam_tvlv_unicast_handler_v1 - process incoming tt roam tvlv container
+ * batadv_roam_tvlv_unicast_handler_v1() - process incoming tt roam tvlv
+ *  container
  * @bat_priv: the bat priv with all the soft interface information
  * @src: mac address of tt tvlv sender
  * @dst: mac address of tt tvlv recipient
@@ -4281,7 +4321,7 @@ out:
 }
 
 /**
- * batadv_tt_init - initialise the translation table internals
+ * batadv_tt_init() - initialise the translation table internals
  * @bat_priv: the bat priv with all the soft interface information
  *
  * Return: 0 on success or negative error number in case of failure.
@@ -4317,7 +4357,7 @@ int batadv_tt_init(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_tt_global_is_isolated - check if a client is marked as isolated
+ * batadv_tt_global_is_isolated() - check if a client is marked as isolated
  * @bat_priv: the bat priv with all the soft interface information
  * @addr: the mac address of the client
  * @vid: the identifier of the VLAN where this client is connected
@@ -4343,7 +4383,7 @@ bool batadv_tt_global_is_isolated(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tt_cache_init - Initialize tt memory object cache
+ * batadv_tt_cache_init() - Initialize tt memory object cache
  *
  * Return: 0 on success or negative error number in case of failure.
  */
@@ -4412,7 +4452,7 @@ err_tt_tl_destroy:
 }
 
 /**
- * batadv_tt_cache_destroy - Destroy tt memory object cache
+ * batadv_tt_cache_destroy() - Destroy tt memory object cache
  */
 void batadv_tt_cache_destroy(void)
 {
index 411d586..8d9e3ab 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich, Antonio Quartulli
index 1d9e267..5ffcb45 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
@@ -19,7 +20,7 @@
 
 #include <linux/byteorder/generic.h>
 #include <linux/etherdevice.h>
-#include <linux/fs.h>
+#include <linux/gfp.h>
 #include <linux/if_ether.h>
 #include <linux/kernel.h>
 #include <linux/kref.h>
 #include <linux/stddef.h>
 #include <linux/string.h>
 #include <linux/types.h>
+#include <uapi/linux/batadv_packet.h>
 
 #include "originator.h"
-#include "packet.h"
 #include "send.h"
 #include "tvlv.h"
 
 /**
- * batadv_tvlv_handler_release - release tvlv handler from lists and queue for
+ * batadv_tvlv_handler_release() - release tvlv handler from lists and queue for
  *  free after rcu grace period
  * @ref: kref pointer of the tvlv
  */
@@ -55,7 +56,7 @@ static void batadv_tvlv_handler_release(struct kref *ref)
 }
 
 /**
- * batadv_tvlv_handler_put - decrement the tvlv container refcounter and
+ * batadv_tvlv_handler_put() - decrement the tvlv container refcounter and
  *  possibly release it
  * @tvlv_handler: the tvlv handler to free
  */
@@ -65,7 +66,7 @@ static void batadv_tvlv_handler_put(struct batadv_tvlv_handler *tvlv_handler)
 }
 
 /**
- * batadv_tvlv_handler_get - retrieve tvlv handler from the tvlv handler list
+ * batadv_tvlv_handler_get() - retrieve tvlv handler from the tvlv handler list
  *  based on the provided type and version (both need to match)
  * @bat_priv: the bat priv with all the soft interface information
  * @type: tvlv handler type to look for
@@ -99,7 +100,7 @@ batadv_tvlv_handler_get(struct batadv_priv *bat_priv, u8 type, u8 version)
 }
 
 /**
- * batadv_tvlv_container_release - release tvlv from lists and free
+ * batadv_tvlv_container_release() - release tvlv from lists and free
  * @ref: kref pointer of the tvlv
  */
 static void batadv_tvlv_container_release(struct kref *ref)
@@ -111,7 +112,7 @@ static void batadv_tvlv_container_release(struct kref *ref)
 }
 
 /**
- * batadv_tvlv_container_put - decrement the tvlv container refcounter and
+ * batadv_tvlv_container_put() - decrement the tvlv container refcounter and
  *  possibly release it
  * @tvlv: the tvlv container to free
  */
@@ -121,7 +122,7 @@ static void batadv_tvlv_container_put(struct batadv_tvlv_container *tvlv)
 }
 
 /**
- * batadv_tvlv_container_get - retrieve tvlv container from the tvlv container
+ * batadv_tvlv_container_get() - retrieve tvlv container from the tvlv container
  *  list based on the provided type and version (both need to match)
  * @bat_priv: the bat priv with all the soft interface information
  * @type: tvlv container type to look for
@@ -155,7 +156,7 @@ batadv_tvlv_container_get(struct batadv_priv *bat_priv, u8 type, u8 version)
 }
 
 /**
- * batadv_tvlv_container_list_size - calculate the size of the tvlv container
+ * batadv_tvlv_container_list_size() - calculate the size of the tvlv container
  *  list entries
  * @bat_priv: the bat priv with all the soft interface information
  *
@@ -180,8 +181,8 @@ static u16 batadv_tvlv_container_list_size(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_tvlv_container_remove - remove tvlv container from the tvlv container
- *  list
+ * batadv_tvlv_container_remove() - remove tvlv container from the tvlv
+ *  container list
  * @bat_priv: the bat priv with all the soft interface information
  * @tvlv: the to be removed tvlv container
  *
@@ -204,7 +205,7 @@ static void batadv_tvlv_container_remove(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tvlv_container_unregister - unregister tvlv container based on the
+ * batadv_tvlv_container_unregister() - unregister tvlv container based on the
  *  provided type and version (both need to match)
  * @bat_priv: the bat priv with all the soft interface information
  * @type: tvlv container type to unregister
@@ -222,7 +223,7 @@ void batadv_tvlv_container_unregister(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tvlv_container_register - register tvlv type, version and content
+ * batadv_tvlv_container_register() - register tvlv type, version and content
  *  to be propagated with each (primary interface) OGM
  * @bat_priv: the bat priv with all the soft interface information
  * @type: tvlv container type
@@ -267,7 +268,7 @@ void batadv_tvlv_container_register(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tvlv_realloc_packet_buff - reallocate packet buffer to accommodate
+ * batadv_tvlv_realloc_packet_buff() - reallocate packet buffer to accommodate
  *  requested packet size
  * @packet_buff: packet buffer
  * @packet_buff_len: packet buffer size
@@ -300,7 +301,7 @@ static bool batadv_tvlv_realloc_packet_buff(unsigned char **packet_buff,
 }
 
 /**
- * batadv_tvlv_container_ogm_append - append tvlv container content to given
+ * batadv_tvlv_container_ogm_append() - append tvlv container content to given
  *  OGM packet buffer
  * @bat_priv: the bat priv with all the soft interface information
  * @packet_buff: ogm packet buffer
@@ -353,7 +354,7 @@ end:
 }
 
 /**
- * batadv_tvlv_call_handler - parse the given tvlv buffer to call the
+ * batadv_tvlv_call_handler() - parse the given tvlv buffer to call the
  *  appropriate handlers
  * @bat_priv: the bat priv with all the soft interface information
  * @tvlv_handler: tvlv callback function handling the tvlv content
@@ -407,7 +408,7 @@ static int batadv_tvlv_call_handler(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tvlv_containers_process - parse the given tvlv buffer to call the
+ * batadv_tvlv_containers_process() - parse the given tvlv buffer to call the
  *  appropriate handlers
  * @bat_priv: the bat priv with all the soft interface information
  * @ogm_source: flag indicating whether the tvlv is an ogm or a unicast packet
@@ -474,7 +475,7 @@ int batadv_tvlv_containers_process(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tvlv_ogm_receive - process an incoming ogm and call the appropriate
+ * batadv_tvlv_ogm_receive() - process an incoming ogm and call the appropriate
  *  handlers
  * @bat_priv: the bat priv with all the soft interface information
  * @batadv_ogm_packet: ogm packet containing the tvlv containers
@@ -501,7 +502,7 @@ void batadv_tvlv_ogm_receive(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tvlv_handler_register - register tvlv handler based on the provided
+ * batadv_tvlv_handler_register() - register tvlv handler based on the provided
  *  type and version (both need to match) for ogm tvlv payload and/or unicast
  *  payload
  * @bat_priv: the bat priv with all the soft interface information
@@ -556,7 +557,7 @@ void batadv_tvlv_handler_register(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tvlv_handler_unregister - unregister tvlv handler based on the
+ * batadv_tvlv_handler_unregister() - unregister tvlv handler based on the
  *  provided type and version (both need to match)
  * @bat_priv: the bat priv with all the soft interface information
  * @type: tvlv handler type to be unregistered
@@ -579,7 +580,7 @@ void batadv_tvlv_handler_unregister(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tvlv_unicast_send - send a unicast packet with tvlv payload to the
+ * batadv_tvlv_unicast_send() - send a unicast packet with tvlv payload to the
  *  specified host
  * @bat_priv: the bat priv with all the soft interface information
  * @src: source mac address of the unicast packet
index 4d01400..a74df33 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
index a627958..bb15784 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
 #include <linux/types.h>
 #include <linux/wait.h>
 #include <linux/workqueue.h>
+#include <uapi/linux/batadv_packet.h>
 #include <uapi/linux/batman_adv.h>
 
-#include "packet.h"
-
 struct seq_file;
 
 #ifdef CONFIG_BATMAN_ADV_DAT
@@ -54,13 +54,15 @@ struct seq_file;
 
 /**
  * enum batadv_dhcp_recipient - dhcp destination
- * @BATADV_DHCP_NO: packet is not a dhcp message
- * @BATADV_DHCP_TO_SERVER: dhcp message is directed to a server
- * @BATADV_DHCP_TO_CLIENT: dhcp message is directed to a client
  */
 enum batadv_dhcp_recipient {
+       /** @BATADV_DHCP_NO: packet is not a dhcp message */
        BATADV_DHCP_NO = 0,
+
+       /** @BATADV_DHCP_TO_SERVER: dhcp message is directed to a server */
        BATADV_DHCP_TO_SERVER,
+
+       /** @BATADV_DHCP_TO_CLIENT: dhcp message is directed to a client */
        BATADV_DHCP_TO_CLIENT,
 };
 
@@ -78,196 +80,274 @@ enum batadv_dhcp_recipient {
 
 /**
  * struct batadv_hard_iface_bat_iv - per hard-interface B.A.T.M.A.N. IV data
- * @ogm_buff: buffer holding the OGM packet
- * @ogm_buff_len: length of the OGM packet buffer
- * @ogm_seqno: OGM sequence number - used to identify each OGM
  */
 struct batadv_hard_iface_bat_iv {
+       /** @ogm_buff: buffer holding the OGM packet */
        unsigned char *ogm_buff;
+
+       /** @ogm_buff_len: length of the OGM packet buffer */
        int ogm_buff_len;
+
+       /** @ogm_seqno: OGM sequence number - used to identify each OGM */
        atomic_t ogm_seqno;
 };
 
 /**
  * enum batadv_v_hard_iface_flags - interface flags useful to B.A.T.M.A.N. V
- * @BATADV_FULL_DUPLEX: tells if the connection over this link is full-duplex
- * @BATADV_WARNING_DEFAULT: tells whether we have warned the user that no
- *  throughput data is available for this interface and that default values are
- *  assumed.
  */
 enum batadv_v_hard_iface_flags {
+       /**
+        * @BATADV_FULL_DUPLEX: tells if the connection over this link is
+        *  full-duplex
+        */
        BATADV_FULL_DUPLEX      = BIT(0),
+
+       /**
+        * @BATADV_WARNING_DEFAULT: tells whether we have warned the user that
+        *  no throughput data is available for this interface and that default
+        *  values are assumed.
+        */
        BATADV_WARNING_DEFAULT  = BIT(1),
 };
 
 /**
  * struct batadv_hard_iface_bat_v - per hard-interface B.A.T.M.A.N. V data
- * @elp_interval: time interval between two ELP transmissions
- * @elp_seqno: current ELP sequence number
- * @elp_skb: base skb containing the ELP message to send
- * @elp_wq: workqueue used to schedule ELP transmissions
- * @throughput_override: throughput override to disable link auto-detection
- * @flags: interface specific flags
  */
 struct batadv_hard_iface_bat_v {
+       /** @elp_interval: time interval between two ELP transmissions */
        atomic_t elp_interval;
+
+       /** @elp_seqno: current ELP sequence number */
        atomic_t elp_seqno;
+
+       /** @elp_skb: base skb containing the ELP message to send */
        struct sk_buff *elp_skb;
+
+       /** @elp_wq: workqueue used to schedule ELP transmissions */
        struct delayed_work elp_wq;
+
+       /**
+        * @throughput_override: throughput override to disable link
+        *  auto-detection
+        */
        atomic_t throughput_override;
+
+       /** @flags: interface specific flags */
        u8 flags;
 };
 
 /**
  * enum batadv_hard_iface_wifi_flags - Flags describing the wifi configuration
  *  of a batadv_hard_iface
- * @BATADV_HARDIF_WIFI_WEXT_DIRECT: it is a wext wifi device
- * @BATADV_HARDIF_WIFI_CFG80211_DIRECT: it is a cfg80211 wifi device
- * @BATADV_HARDIF_WIFI_WEXT_INDIRECT: link device is a wext wifi device
- * @BATADV_HARDIF_WIFI_CFG80211_INDIRECT: link device is a cfg80211 wifi device
  */
 enum batadv_hard_iface_wifi_flags {
+       /** @BATADV_HARDIF_WIFI_WEXT_DIRECT: it is a wext wifi device */
        BATADV_HARDIF_WIFI_WEXT_DIRECT = BIT(0),
+
+       /** @BATADV_HARDIF_WIFI_CFG80211_DIRECT: it is a cfg80211 wifi device */
        BATADV_HARDIF_WIFI_CFG80211_DIRECT = BIT(1),
+
+       /**
+        * @BATADV_HARDIF_WIFI_WEXT_INDIRECT: link device is a wext wifi device
+        */
        BATADV_HARDIF_WIFI_WEXT_INDIRECT = BIT(2),
+
+       /**
+        * @BATADV_HARDIF_WIFI_CFG80211_INDIRECT: link device is a cfg80211 wifi
+        * device
+        */
        BATADV_HARDIF_WIFI_CFG80211_INDIRECT = BIT(3),
 };
 
 /**
  * struct batadv_hard_iface - network device known to batman-adv
- * @list: list node for batadv_hardif_list
- * @if_num: identificator of the interface
- * @if_status: status of the interface for batman-adv
- * @num_bcasts: number of payload re-broadcasts on this interface (ARQ)
- * @wifi_flags: flags whether this is (directly or indirectly) a wifi interface
- * @net_dev: pointer to the net_device
- * @hardif_obj: kobject of the per interface sysfs "mesh" directory
- * @refcount: number of contexts the object is used
- * @batman_adv_ptype: packet type describing packets that should be processed by
- *  batman-adv for this interface
- * @soft_iface: the batman-adv interface which uses this network interface
- * @rcu: struct used for freeing in an RCU-safe manner
- * @bat_iv: per hard-interface B.A.T.M.A.N. IV data
- * @bat_v: per hard-interface B.A.T.M.A.N. V data
- * @debug_dir: dentry for nc subdir in batman-adv directory in debugfs
- * @neigh_list: list of unique single hop neighbors via this interface
- * @neigh_list_lock: lock protecting neigh_list
  */
 struct batadv_hard_iface {
+       /** @list: list node for batadv_hardif_list */
        struct list_head list;
+
+       /** @if_num: identificator of the interface */
        s16 if_num;
+
+       /** @if_status: status of the interface for batman-adv */
        char if_status;
+
+       /**
+        * @num_bcasts: number of payload re-broadcasts on this interface (ARQ)
+        */
        u8 num_bcasts;
+
+       /**
+        * @wifi_flags: flags whether this is (directly or indirectly) a wifi
+        *  interface
+        */
        u32 wifi_flags;
+
+       /** @net_dev: pointer to the net_device */
        struct net_device *net_dev;
+
+       /** @hardif_obj: kobject of the per interface sysfs "mesh" directory */
        struct kobject *hardif_obj;
+
+       /** @refcount: number of contexts the object is used */
        struct kref refcount;
+
+       /**
+        * @batman_adv_ptype: packet type describing packets that should be
+        * processed by batman-adv for this interface
+        */
        struct packet_type batman_adv_ptype;
+
+       /**
+        * @soft_iface: the batman-adv interface which uses this network
+        *  interface
+        */
        struct net_device *soft_iface;
+
+       /** @rcu: struct used for freeing in an RCU-safe manner */
        struct rcu_head rcu;
+
+       /** @bat_iv: per hard-interface B.A.T.M.A.N. IV data */
        struct batadv_hard_iface_bat_iv bat_iv;
+
 #ifdef CONFIG_BATMAN_ADV_BATMAN_V
+       /** @bat_v: per hard-interface B.A.T.M.A.N. V data */
        struct batadv_hard_iface_bat_v bat_v;
 #endif
+
+       /**
+        * @debug_dir: dentry for nc subdir in batman-adv directory in debugfs
+        */
        struct dentry *debug_dir;
+
+       /**
+        * @neigh_list: list of unique single hop neighbors via this interface
+        */
        struct hlist_head neigh_list;
-       /* neigh_list_lock protects: neigh_list */
+
+       /** @neigh_list_lock: lock protecting neigh_list */
        spinlock_t neigh_list_lock;
 };
 
 /**
  * struct batadv_orig_ifinfo - originator info per outgoing interface
- * @list: list node for orig_node::ifinfo_list
- * @if_outgoing: pointer to outgoing hard-interface
- * @router: router that should be used to reach this originator
- * @last_real_seqno: last and best known sequence number
- * @last_ttl: ttl of last received packet
- * @last_seqno_forwarded: seqno of the OGM which was forwarded last
- * @batman_seqno_reset: time when the batman seqno window was reset
- * @refcount: number of contexts the object is used
- * @rcu: struct used for freeing in an RCU-safe manner
  */
 struct batadv_orig_ifinfo {
+       /** @list: list node for &batadv_orig_node.ifinfo_list */
        struct hlist_node list;
+
+       /** @if_outgoing: pointer to outgoing hard-interface */
        struct batadv_hard_iface *if_outgoing;
-       struct batadv_neigh_node __rcu *router; /* rcu protected pointer */
+
+       /** @router: router that should be used to reach this originator */
+       struct batadv_neigh_node __rcu *router;
+
+       /** @last_real_seqno: last and best known sequence number */
        u32 last_real_seqno;
+
+       /** @last_ttl: ttl of last received packet */
        u8 last_ttl;
+
+       /** @last_seqno_forwarded: seqno of the OGM which was forwarded last */
        u32 last_seqno_forwarded;
+
+       /** @batman_seqno_reset: time when the batman seqno window was reset */
        unsigned long batman_seqno_reset;
+
+       /** @refcount: number of contexts the object is used */
        struct kref refcount;
+
+       /** @rcu: struct used for freeing in an RCU-safe manner */
        struct rcu_head rcu;
 };
 
 /**
  * struct batadv_frag_table_entry - head in the fragment buffer table
- * @fragment_list: head of list with fragments
- * @lock: lock to protect the list of fragments
- * @timestamp: time (jiffie) of last received fragment
- * @seqno: sequence number of the fragments in the list
- * @size: accumulated size of packets in list
- * @total_size: expected size of the assembled packet
  */
 struct batadv_frag_table_entry {
+       /** @fragment_list: head of list with fragments */
        struct hlist_head fragment_list;
-       spinlock_t lock; /* protects fragment_list */
+
+       /** @lock: lock to protect the list of fragments */
+       spinlock_t lock;
+
+       /** @timestamp: time (jiffie) of last received fragment */
        unsigned long timestamp;
+
+       /** @seqno: sequence number of the fragments in the list */
        u16 seqno;
+
+       /** @size: accumulated size of packets in list */
        u16 size;
+
+       /** @total_size: expected size of the assembled packet */
        u16 total_size;
 };
 
 /**
  * struct batadv_frag_list_entry - entry in a list of fragments
- * @list: list node information
- * @skb: fragment
- * @no: fragment number in the set
  */
 struct batadv_frag_list_entry {
+       /** @list: list node information */
        struct hlist_node list;
+
+       /** @skb: fragment */
        struct sk_buff *skb;
+
+       /** @no: fragment number in the set */
        u8 no;
 };
 
 /**
  * struct batadv_vlan_tt - VLAN specific TT attributes
- * @crc: CRC32 checksum of the entries belonging to this vlan
- * @num_entries: number of TT entries for this VLAN
  */
 struct batadv_vlan_tt {
+       /** @crc: CRC32 checksum of the entries belonging to this vlan */
        u32 crc;
+
+       /** @num_entries: number of TT entries for this VLAN */
        atomic_t num_entries;
 };
 
 /**
  * struct batadv_orig_node_vlan - VLAN specific data per orig_node
- * @vid: the VLAN identifier
- * @tt: VLAN specific TT attributes
- * @list: list node for orig_node::vlan_list
- * @refcount: number of context where this object is currently in use
- * @rcu: struct used for freeing in a RCU-safe manner
  */
 struct batadv_orig_node_vlan {
+       /** @vid: the VLAN identifier */
        unsigned short vid;
+
+       /** @tt: VLAN specific TT attributes */
        struct batadv_vlan_tt tt;
+
+       /** @list: list node for &batadv_orig_node.vlan_list */
        struct hlist_node list;
+
+       /**
+        * @refcount: number of context where this object is currently in use
+        */
        struct kref refcount;
+
+       /** @rcu: struct used for freeing in a RCU-safe manner */
        struct rcu_head rcu;
 };
 
 /**
  * struct batadv_orig_bat_iv - B.A.T.M.A.N. IV private orig_node members
- * @bcast_own: set of bitfields (one per hard-interface) where each one counts
- * the number of our OGMs this orig_node rebroadcasted "back" to us  (relative
- * to last_real_seqno). Every bitfield is BATADV_TQ_LOCAL_WINDOW_SIZE bits long.
- * @bcast_own_sum: sum of bcast_own
- * @ogm_cnt_lock: lock protecting bcast_own, bcast_own_sum,
- *  neigh_node->bat_iv.real_bits & neigh_node->bat_iv.real_packet_count
  */
 struct batadv_orig_bat_iv {
+       /**
+        * @bcast_own: set of bitfields (one per hard-interface) where each one
+        * counts the number of our OGMs this orig_node rebroadcasted "back" to
+        * us  (relative to last_real_seqno). Every bitfield is
+        * BATADV_TQ_LOCAL_WINDOW_SIZE bits long.
+        */
        unsigned long *bcast_own;
+
+       /** @bcast_own_sum: sum of bcast_own */
        u8 *bcast_own_sum;
-       /* ogm_cnt_lock protects: bcast_own, bcast_own_sum,
+
+       /**
+        * @ogm_cnt_lock: lock protecting bcast_own, bcast_own_sum,
         * neigh_node->bat_iv.real_bits & neigh_node->bat_iv.real_packet_count
         */
        spinlock_t ogm_cnt_lock;
@@ -275,130 +355,205 @@ struct batadv_orig_bat_iv {
 
 /**
  * struct batadv_orig_node - structure for orig_list maintaining nodes of mesh
- * @orig: originator ethernet address
- * @ifinfo_list: list for routers per outgoing interface
- * @last_bonding_candidate: pointer to last ifinfo of last used router
- * @dat_addr: address of the orig node in the distributed hash
- * @last_seen: time when last packet from this node was received
- * @bcast_seqno_reset: time when the broadcast seqno window was reset
- * @mcast_handler_lock: synchronizes mcast-capability and -flag changes
- * @mcast_flags: multicast flags announced by the orig node
- * @mcast_want_all_unsnoopables_node: a list node for the
- *  mcast.want_all_unsnoopables list
- * @mcast_want_all_ipv4_node: a list node for the mcast.want_all_ipv4 list
- * @mcast_want_all_ipv6_node: a list node for the mcast.want_all_ipv6 list
- * @capabilities: announced capabilities of this originator
- * @capa_initialized: bitfield to remember whether a capability was initialized
- * @last_ttvn: last seen translation table version number
- * @tt_buff: last tt changeset this node received from the orig node
- * @tt_buff_len: length of the last tt changeset this node received from the
- *  orig node
- * @tt_buff_lock: lock that protects tt_buff and tt_buff_len
- * @tt_lock: prevents from updating the table while reading it. Table update is
- *  made up by two operations (data structure update and metdata -CRC/TTVN-
- *  recalculation) and they have to be executed atomically in order to avoid
- *  another thread to read the table/metadata between those.
- * @bcast_bits: bitfield containing the info which payload broadcast originated
- *  from this orig node this host already has seen (relative to
- *  last_bcast_seqno)
- * @last_bcast_seqno: last broadcast sequence number received by this host
- * @neigh_list: list of potential next hop neighbor towards this orig node
- * @neigh_list_lock: lock protecting neigh_list and router
- * @hash_entry: hlist node for batadv_priv::orig_hash
- * @bat_priv: pointer to soft_iface this orig node belongs to
- * @bcast_seqno_lock: lock protecting bcast_bits & last_bcast_seqno
- * @refcount: number of contexts the object is used
- * @rcu: struct used for freeing in an RCU-safe manner
- * @in_coding_list: list of nodes this orig can hear
- * @out_coding_list: list of nodes that can hear this orig
- * @in_coding_list_lock: protects in_coding_list
- * @out_coding_list_lock: protects out_coding_list
- * @fragments: array with heads for fragment chains
- * @vlan_list: a list of orig_node_vlan structs, one per VLAN served by the
- *  originator represented by this object
- * @vlan_list_lock: lock protecting vlan_list
- * @bat_iv: B.A.T.M.A.N. IV private structure
  */
 struct batadv_orig_node {
+       /** @orig: originator ethernet address */
        u8 orig[ETH_ALEN];
+
+       /** @ifinfo_list: list for routers per outgoing interface */
        struct hlist_head ifinfo_list;
+
+       /**
+        * @last_bonding_candidate: pointer to last ifinfo of last used router
+        */
        struct batadv_orig_ifinfo *last_bonding_candidate;
+
 #ifdef CONFIG_BATMAN_ADV_DAT
+       /** @dat_addr: address of the orig node in the distributed hash */
        batadv_dat_addr_t dat_addr;
 #endif
+
+       /** @last_seen: time when last packet from this node was received */
        unsigned long last_seen;
+
+       /**
+        * @bcast_seqno_reset: time when the broadcast seqno window was reset
+        */
        unsigned long bcast_seqno_reset;
+
 #ifdef CONFIG_BATMAN_ADV_MCAST
-       /* synchronizes mcast tvlv specific orig changes */
+       /**
+        * @mcast_handler_lock: synchronizes mcast-capability and -flag changes
+        */
        spinlock_t mcast_handler_lock;
+
+       /** @mcast_flags: multicast flags announced by the orig node */
        u8 mcast_flags;
+
+       /**
+        * @mcast_want_all_unsnoopables_node: a list node for the
+        *  mcast.want_all_unsnoopables list
+        */
        struct hlist_node mcast_want_all_unsnoopables_node;
+
+       /**
+        * @mcast_want_all_ipv4_node: a list node for the mcast.want_all_ipv4
+        *  list
+        */
        struct hlist_node mcast_want_all_ipv4_node;
+       /**
+        * @mcast_want_all_ipv6_node: a list node for the mcast.want_all_ipv6
+        *  list
+        */
        struct hlist_node mcast_want_all_ipv6_node;
 #endif
+
+       /** @capabilities: announced capabilities of this originator */
        unsigned long capabilities;
+
+       /**
+        * @capa_initialized: bitfield to remember whether a capability was
+        *  initialized
+        */
        unsigned long capa_initialized;
+
+       /** @last_ttvn: last seen translation table version number */
        atomic_t last_ttvn;
+
+       /** @tt_buff: last tt changeset this node received from the orig node */
        unsigned char *tt_buff;
+
+       /**
+        * @tt_buff_len: length of the last tt changeset this node received
+        *  from the orig node
+        */
        s16 tt_buff_len;
-       spinlock_t tt_buff_lock; /* protects tt_buff & tt_buff_len */
-       /* prevents from changing the table while reading it */
+
+       /** @tt_buff_lock: lock that protects tt_buff and tt_buff_len */
+       spinlock_t tt_buff_lock;
+
+       /**
+        * @tt_lock: prevents from updating the table while reading it. Table
+        *  update is made up by two operations (data structure update and
+        *  metdata -CRC/TTVN-recalculation) and they have to be executed
+        *  atomically in order to avoid another thread to read the
+        *  table/metadata between those.
+        */
        spinlock_t tt_lock;
+
+       /**
+        * @bcast_bits: bitfield containing the info which payload broadcast
+        *  originated from this orig node this host already has seen (relative
+        *  to last_bcast_seqno)
+        */
        DECLARE_BITMAP(bcast_bits, BATADV_TQ_LOCAL_WINDOW_SIZE);
+
+       /**
+        * @last_bcast_seqno: last broadcast sequence number received by this
+        *  host
+        */
        u32 last_bcast_seqno;
+
+       /**
+        * @neigh_list: list of potential next hop neighbor towards this orig
+        *  node
+        */
        struct hlist_head neigh_list;
-       /* neigh_list_lock protects: neigh_list, ifinfo_list,
-        * last_bonding_candidate and router
+
+       /**
+        * @neigh_list_lock: lock protecting neigh_list, ifinfo_list,
+        *  last_bonding_candidate and router
         */
        spinlock_t neigh_list_lock;
+
+       /** @hash_entry: hlist node for &batadv_priv.orig_hash */
        struct hlist_node hash_entry;
+
+       /** @bat_priv: pointer to soft_iface this orig node belongs to */
        struct batadv_priv *bat_priv;
-       /* bcast_seqno_lock protects: bcast_bits & last_bcast_seqno */
+
+       /** @bcast_seqno_lock: lock protecting bcast_bits & last_bcast_seqno */
        spinlock_t bcast_seqno_lock;
+
+       /** @refcount: number of contexts the object is used */
        struct kref refcount;
+
+       /** @rcu: struct used for freeing in an RCU-safe manner */
        struct rcu_head rcu;
+
 #ifdef CONFIG_BATMAN_ADV_NC
+       /** @in_coding_list: list of nodes this orig can hear */
        struct list_head in_coding_list;
+
+       /** @out_coding_list: list of nodes that can hear this orig */
        struct list_head out_coding_list;
-       spinlock_t in_coding_list_lock; /* Protects in_coding_list */
-       spinlock_t out_coding_list_lock; /* Protects out_coding_list */
+
+       /** @in_coding_list_lock: protects in_coding_list */
+       spinlock_t in_coding_list_lock;
+
+       /** @out_coding_list_lock: protects out_coding_list */
+       spinlock_t out_coding_list_lock;
 #endif
+
+       /** @fragments: array with heads for fragment chains */
        struct batadv_frag_table_entry fragments[BATADV_FRAG_BUFFER_COUNT];
+
+       /**
+        * @vlan_list: a list of orig_node_vlan structs, one per VLAN served by
+        *  the originator represented by this object
+        */
        struct hlist_head vlan_list;
-       spinlock_t vlan_list_lock; /* protects vlan_list */
+
+       /** @vlan_list_lock: lock protecting vlan_list */
+       spinlock_t vlan_list_lock;
+
+       /** @bat_iv: B.A.T.M.A.N. IV private structure */
        struct batadv_orig_bat_iv bat_iv;
 };
 
 /**
  * enum batadv_orig_capabilities - orig node capabilities
- * @BATADV_ORIG_CAPA_HAS_DAT: orig node has distributed arp table enabled
- * @BATADV_ORIG_CAPA_HAS_NC: orig node has network coding enabled
- * @BATADV_ORIG_CAPA_HAS_TT: orig node has tt capability
- * @BATADV_ORIG_CAPA_HAS_MCAST: orig node has some multicast capability
- *  (= orig node announces a tvlv of type BATADV_TVLV_MCAST)
  */
 enum batadv_orig_capabilities {
+       /**
+        * @BATADV_ORIG_CAPA_HAS_DAT: orig node has distributed arp table
+        *  enabled
+        */
        BATADV_ORIG_CAPA_HAS_DAT,
+
+       /** @BATADV_ORIG_CAPA_HAS_NC: orig node has network coding enabled */
        BATADV_ORIG_CAPA_HAS_NC,
+
+       /** @BATADV_ORIG_CAPA_HAS_TT: orig node has tt capability */
        BATADV_ORIG_CAPA_HAS_TT,
+
+       /**
+        * @BATADV_ORIG_CAPA_HAS_MCAST: orig node has some multicast capability
+        *  (= orig node announces a tvlv of type BATADV_TVLV_MCAST)
+        */
        BATADV_ORIG_CAPA_HAS_MCAST,
 };
 
 /**
  * struct batadv_gw_node - structure for orig nodes announcing gw capabilities
- * @list: list node for batadv_priv_gw::list
- * @orig_node: pointer to corresponding orig node
- * @bandwidth_down: advertised uplink download bandwidth
- * @bandwidth_up: advertised uplink upload bandwidth
- * @refcount: number of contexts the object is used
- * @rcu: struct used for freeing in an RCU-safe manner
  */
 struct batadv_gw_node {
+       /** @list: list node for &batadv_priv_gw.list */
        struct hlist_node list;
+
+       /** @orig_node: pointer to corresponding orig node */
        struct batadv_orig_node *orig_node;
+
+       /** @bandwidth_down: advertised uplink download bandwidth */
        u32 bandwidth_down;
+
+       /** @bandwidth_up: advertised uplink upload bandwidth */
        u32 bandwidth_up;
+
+       /** @refcount: number of contexts the object is used */
        struct kref refcount;
+
+       /** @rcu: struct used for freeing in an RCU-safe manner */
        struct rcu_head rcu;
 };
 
@@ -407,118 +562,161 @@ DECLARE_EWMA(throughput, 10, 8)
 /**
  * struct batadv_hardif_neigh_node_bat_v - B.A.T.M.A.N. V private neighbor
  *  information
- * @throughput: ewma link throughput towards this neighbor
- * @elp_interval: time interval between two ELP transmissions
- * @elp_latest_seqno: latest and best known ELP sequence number
- * @last_unicast_tx: when the last unicast packet has been sent to this neighbor
- * @metric_work: work queue callback item for metric update
  */
 struct batadv_hardif_neigh_node_bat_v {
+       /** @throughput: ewma link throughput towards this neighbor */
        struct ewma_throughput throughput;
+
+       /** @elp_interval: time interval between two ELP transmissions */
        u32 elp_interval;
+
+       /** @elp_latest_seqno: latest and best known ELP sequence number */
        u32 elp_latest_seqno;
+
+       /**
+        * @last_unicast_tx: when the last unicast packet has been sent to this
+        *  neighbor
+        */
        unsigned long last_unicast_tx;
+
+       /** @metric_work: work queue callback item for metric update */
        struct work_struct metric_work;
 };
 
 /**
  * struct batadv_hardif_neigh_node - unique neighbor per hard-interface
- * @list: list node for batadv_hard_iface::neigh_list
- * @addr: the MAC address of the neighboring interface
- * @orig: the address of the originator this neighbor node belongs to
- * @if_incoming: pointer to incoming hard-interface
- * @last_seen: when last packet via this neighbor was received
- * @bat_v: B.A.T.M.A.N. V private data
- * @refcount: number of contexts the object is used
- * @rcu: struct used for freeing in a RCU-safe manner
  */
 struct batadv_hardif_neigh_node {
+       /** @list: list node for &batadv_hard_iface.neigh_list */
        struct hlist_node list;
+
+       /** @addr: the MAC address of the neighboring interface */
        u8 addr[ETH_ALEN];
+
+       /**
+        * @orig: the address of the originator this neighbor node belongs to
+        */
        u8 orig[ETH_ALEN];
+
+       /** @if_incoming: pointer to incoming hard-interface */
        struct batadv_hard_iface *if_incoming;
+
+       /** @last_seen: when last packet via this neighbor was received */
        unsigned long last_seen;
+
 #ifdef CONFIG_BATMAN_ADV_BATMAN_V
+       /** @bat_v: B.A.T.M.A.N. V private data */
        struct batadv_hardif_neigh_node_bat_v bat_v;
 #endif
+
+       /** @refcount: number of contexts the object is used */
        struct kref refcount;
+
+       /** @rcu: struct used for freeing in a RCU-safe manner */
        struct rcu_head rcu;
 };
 
 /**
  * struct batadv_neigh_node - structure for single hops neighbors
- * @list: list node for batadv_orig_node::neigh_list
- * @orig_node: pointer to corresponding orig_node
- * @addr: the MAC address of the neighboring interface
- * @ifinfo_list: list for routing metrics per outgoing interface
- * @ifinfo_lock: lock protecting private ifinfo members and list
- * @if_incoming: pointer to incoming hard-interface
- * @last_seen: when last packet via this neighbor was received
- * @hardif_neigh: hardif_neigh of this neighbor
- * @refcount: number of contexts the object is used
- * @rcu: struct used for freeing in an RCU-safe manner
  */
 struct batadv_neigh_node {
+       /** @list: list node for &batadv_orig_node.neigh_list */
        struct hlist_node list;
+
+       /** @orig_node: pointer to corresponding orig_node */
        struct batadv_orig_node *orig_node;
+
+       /** @addr: the MAC address of the neighboring interface */
        u8 addr[ETH_ALEN];
+
+       /** @ifinfo_list: list for routing metrics per outgoing interface */
        struct hlist_head ifinfo_list;
-       spinlock_t ifinfo_lock; /* protects ifinfo_list and its members */
+
+       /** @ifinfo_lock: lock protecting ifinfo_list and its members */
+       spinlock_t ifinfo_lock;
+
+       /** @if_incoming: pointer to incoming hard-interface */
        struct batadv_hard_iface *if_incoming;
+
+       /** @last_seen: when last packet via this neighbor was received */
        unsigned long last_seen;
+
+       /** @hardif_neigh: hardif_neigh of this neighbor */
        struct batadv_hardif_neigh_node *hardif_neigh;
+
+       /** @refcount: number of contexts the object is used */
        struct kref refcount;
+
+       /** @rcu: struct used for freeing in an RCU-safe manner */
        struct rcu_head rcu;
 };
 
 /**
  * struct batadv_neigh_ifinfo_bat_iv - neighbor information per outgoing
  *  interface for B.A.T.M.A.N. IV
- * @tq_recv: ring buffer of received TQ values from this neigh node
- * @tq_index: ring buffer index
- * @tq_avg: averaged tq of all tq values in the ring buffer (tq_recv)
- * @real_bits: bitfield containing the number of OGMs received from this neigh
- *  node (relative to orig_node->last_real_seqno)
- * @real_packet_count: counted result of real_bits
  */
 struct batadv_neigh_ifinfo_bat_iv {
+       /** @tq_recv: ring buffer of received TQ values from this neigh node */
        u8 tq_recv[BATADV_TQ_GLOBAL_WINDOW_SIZE];
+
+       /** @tq_index: ring buffer index */
        u8 tq_index;
+
+       /**
+        * @tq_avg: averaged tq of all tq values in the ring buffer (tq_recv)
+        */
        u8 tq_avg;
+
+       /**
+        * @real_bits: bitfield containing the number of OGMs received from this
+        *  neigh node (relative to orig_node->last_real_seqno)
+        */
        DECLARE_BITMAP(real_bits, BATADV_TQ_LOCAL_WINDOW_SIZE);
+
+       /** @real_packet_count: counted result of real_bits */
        u8 real_packet_count;
 };
 
 /**
  * struct batadv_neigh_ifinfo_bat_v - neighbor information per outgoing
  *  interface for B.A.T.M.A.N. V
- * @throughput: last throughput metric received from originator via this neigh
- * @last_seqno: last sequence number known for this neighbor
  */
 struct batadv_neigh_ifinfo_bat_v {
+       /**
+        * @throughput: last throughput metric received from originator via this
+        *  neigh
+        */
        u32 throughput;
+
+       /** @last_seqno: last sequence number known for this neighbor */
        u32 last_seqno;
 };
 
 /**
  * struct batadv_neigh_ifinfo - neighbor information per outgoing interface
- * @list: list node for batadv_neigh_node::ifinfo_list
- * @if_outgoing: pointer to outgoing hard-interface
- * @bat_iv: B.A.T.M.A.N. IV private structure
- * @bat_v: B.A.T.M.A.N. V private data
- * @last_ttl: last received ttl from this neigh node
- * @refcount: number of contexts the object is used
- * @rcu: struct used for freeing in a RCU-safe manner
  */
 struct batadv_neigh_ifinfo {
+       /** @list: list node for &batadv_neigh_node.ifinfo_list */
        struct hlist_node list;
+
+       /** @if_outgoing: pointer to outgoing hard-interface */
        struct batadv_hard_iface *if_outgoing;
+
+       /** @bat_iv: B.A.T.M.A.N. IV private structure */
        struct batadv_neigh_ifinfo_bat_iv bat_iv;
+
 #ifdef CONFIG_BATMAN_ADV_BATMAN_V
+       /** @bat_v: B.A.T.M.A.N. V private data */
        struct batadv_neigh_ifinfo_bat_v bat_v;
 #endif
+
+       /** @last_ttl: last received ttl from this neigh node */
        u8 last_ttl;
+
+       /** @refcount: number of contexts the object is used */
        struct kref refcount;
+
+       /** @rcu: struct used for freeing in a RCU-safe manner */
        struct rcu_head rcu;
 };
 
@@ -526,148 +724,278 @@ struct batadv_neigh_ifinfo {
 
 /**
  * struct batadv_bcast_duplist_entry - structure for LAN broadcast suppression
- * @orig: mac address of orig node orginating the broadcast
- * @crc: crc32 checksum of broadcast payload
- * @entrytime: time when the broadcast packet was received
  */
 struct batadv_bcast_duplist_entry {
+       /** @orig: mac address of orig node orginating the broadcast */
        u8 orig[ETH_ALEN];
+
+       /** @crc: crc32 checksum of broadcast payload */
        __be32 crc;
+
+       /** @entrytime: time when the broadcast packet was received */
        unsigned long entrytime;
 };
 #endif
 
 /**
  * enum batadv_counters - indices for traffic counters
- * @BATADV_CNT_TX: transmitted payload traffic packet counter
- * @BATADV_CNT_TX_BYTES: transmitted payload traffic bytes counter
- * @BATADV_CNT_TX_DROPPED: dropped transmission payload traffic packet counter
- * @BATADV_CNT_RX: received payload traffic packet counter
- * @BATADV_CNT_RX_BYTES: received payload traffic bytes counter
- * @BATADV_CNT_FORWARD: forwarded payload traffic packet counter
- * @BATADV_CNT_FORWARD_BYTES: forwarded payload traffic bytes counter
- * @BATADV_CNT_MGMT_TX: transmitted routing protocol traffic packet counter
- * @BATADV_CNT_MGMT_TX_BYTES: transmitted routing protocol traffic bytes counter
- * @BATADV_CNT_MGMT_RX: received routing protocol traffic packet counter
- * @BATADV_CNT_MGMT_RX_BYTES: received routing protocol traffic bytes counter
- * @BATADV_CNT_FRAG_TX: transmitted fragment traffic packet counter
- * @BATADV_CNT_FRAG_TX_BYTES: transmitted fragment traffic bytes counter
- * @BATADV_CNT_FRAG_RX: received fragment traffic packet counter
- * @BATADV_CNT_FRAG_RX_BYTES: received fragment traffic bytes counter
- * @BATADV_CNT_FRAG_FWD: forwarded fragment traffic packet counter
- * @BATADV_CNT_FRAG_FWD_BYTES: forwarded fragment traffic bytes counter
- * @BATADV_CNT_TT_REQUEST_TX: transmitted tt req traffic packet counter
- * @BATADV_CNT_TT_REQUEST_RX: received tt req traffic packet counter
- * @BATADV_CNT_TT_RESPONSE_TX: transmitted tt resp traffic packet counter
- * @BATADV_CNT_TT_RESPONSE_RX: received tt resp traffic packet counter
- * @BATADV_CNT_TT_ROAM_ADV_TX: transmitted tt roam traffic packet counter
- * @BATADV_CNT_TT_ROAM_ADV_RX: received tt roam traffic packet counter
- * @BATADV_CNT_DAT_GET_TX: transmitted dht GET traffic packet counter
- * @BATADV_CNT_DAT_GET_RX: received dht GET traffic packet counter
- * @BATADV_CNT_DAT_PUT_TX: transmitted dht PUT traffic packet counter
- * @BATADV_CNT_DAT_PUT_RX: received dht PUT traffic packet counter
- * @BATADV_CNT_DAT_CACHED_REPLY_TX: transmitted dat cache reply traffic packet
- *  counter
- * @BATADV_CNT_NC_CODE: transmitted nc-combined traffic packet counter
- * @BATADV_CNT_NC_CODE_BYTES: transmitted nc-combined traffic bytes counter
- * @BATADV_CNT_NC_RECODE: transmitted nc-recombined traffic packet counter
- * @BATADV_CNT_NC_RECODE_BYTES: transmitted nc-recombined traffic bytes counter
- * @BATADV_CNT_NC_BUFFER: counter for packets buffered for later nc decoding
- * @BATADV_CNT_NC_DECODE: received and nc-decoded traffic packet counter
- * @BATADV_CNT_NC_DECODE_BYTES: received and nc-decoded traffic bytes counter
- * @BATADV_CNT_NC_DECODE_FAILED: received and decode-failed traffic packet
- *  counter
- * @BATADV_CNT_NC_SNIFFED: counter for nc-decoded packets received in promisc
- *  mode.
- * @BATADV_CNT_NUM: number of traffic counters
  */
 enum batadv_counters {
+       /** @BATADV_CNT_TX: transmitted payload traffic packet counter */
        BATADV_CNT_TX,
+
+       /** @BATADV_CNT_TX_BYTES: transmitted payload traffic bytes counter */
        BATADV_CNT_TX_BYTES,
+
+       /**
+        * @BATADV_CNT_TX_DROPPED: dropped transmission payload traffic packet
+        *  counter
+        */
        BATADV_CNT_TX_DROPPED,
+
+       /** @BATADV_CNT_RX: received payload traffic packet counter */
        BATADV_CNT_RX,
+
+       /** @BATADV_CNT_RX_BYTES: received payload traffic bytes counter */
        BATADV_CNT_RX_BYTES,
+
+       /** @BATADV_CNT_FORWARD: forwarded payload traffic packet counter */
        BATADV_CNT_FORWARD,
+
+       /**
+        * @BATADV_CNT_FORWARD_BYTES: forwarded payload traffic bytes counter
+        */
        BATADV_CNT_FORWARD_BYTES,
+
+       /**
+        * @BATADV_CNT_MGMT_TX: transmitted routing protocol traffic packet
+        *  counter
+        */
        BATADV_CNT_MGMT_TX,
+
+       /**
+        * @BATADV_CNT_MGMT_TX_BYTES: transmitted routing protocol traffic bytes
+        *  counter
+        */
        BATADV_CNT_MGMT_TX_BYTES,
+
+       /**
+        * @BATADV_CNT_MGMT_RX: received routing protocol traffic packet counter
+        */
        BATADV_CNT_MGMT_RX,
+
+       /**
+        * @BATADV_CNT_MGMT_RX_BYTES: received routing protocol traffic bytes
+        *  counter
+        */
        BATADV_CNT_MGMT_RX_BYTES,
+
+       /** @BATADV_CNT_FRAG_TX: transmitted fragment traffic packet counter */
        BATADV_CNT_FRAG_TX,
+
+       /**
+        * @BATADV_CNT_FRAG_TX_BYTES: transmitted fragment traffic bytes counter
+        */
        BATADV_CNT_FRAG_TX_BYTES,
+
+       /** @BATADV_CNT_FRAG_RX: received fragment traffic packet counter */
        BATADV_CNT_FRAG_RX,
+
+       /**
+        * @BATADV_CNT_FRAG_RX_BYTES: received fragment traffic bytes counter
+        */
        BATADV_CNT_FRAG_RX_BYTES,
+
+       /** @BATADV_CNT_FRAG_FWD: forwarded fragment traffic packet counter */
        BATADV_CNT_FRAG_FWD,
+
+       /**
+        * @BATADV_CNT_FRAG_FWD_BYTES: forwarded fragment traffic bytes counter
+        */
        BATADV_CNT_FRAG_FWD_BYTES,
+
+       /**
+        * @BATADV_CNT_TT_REQUEST_TX: transmitted tt req traffic packet counter
+        */
        BATADV_CNT_TT_REQUEST_TX,
+
+       /** @BATADV_CNT_TT_REQUEST_RX: received tt req traffic packet counter */
        BATADV_CNT_TT_REQUEST_RX,
+
+       /**
+        * @BATADV_CNT_TT_RESPONSE_TX: transmitted tt resp traffic packet
+        *  counter
+        */
        BATADV_CNT_TT_RESPONSE_TX,
+
+       /**
+        * @BATADV_CNT_TT_RESPONSE_RX: received tt resp traffic packet counter
+        */
        BATADV_CNT_TT_RESPONSE_RX,
+
+       /**
+        * @BATADV_CNT_TT_ROAM_ADV_TX: transmitted tt roam traffic packet
+        *  counter
+        */
        BATADV_CNT_TT_ROAM_ADV_TX,
+
+       /**
+        * @BATADV_CNT_TT_ROAM_ADV_RX: received tt roam traffic packet counter
+        */
        BATADV_CNT_TT_ROAM_ADV_RX,
+
 #ifdef CONFIG_BATMAN_ADV_DAT
+       /**
+        * @BATADV_CNT_DAT_GET_TX: transmitted dht GET traffic packet counter
+        */
        BATADV_CNT_DAT_GET_TX,
+
+       /** @BATADV_CNT_DAT_GET_RX: received dht GET traffic packet counter */
        BATADV_CNT_DAT_GET_RX,
+
+       /**
+        * @BATADV_CNT_DAT_PUT_TX: transmitted dht PUT traffic packet counter
+        */
        BATADV_CNT_DAT_PUT_TX,
+
+       /** @BATADV_CNT_DAT_PUT_RX: received dht PUT traffic packet counter */
        BATADV_CNT_DAT_PUT_RX,
+
+       /**
+        * @BATADV_CNT_DAT_CACHED_REPLY_TX: transmitted dat cache reply traffic
+        *  packet counter
+        */
        BATADV_CNT_DAT_CACHED_REPLY_TX,
 #endif
+
 #ifdef CONFIG_BATMAN_ADV_NC
+       /**
+        * @BATADV_CNT_NC_CODE: transmitted nc-combined traffic packet counter
+        */
        BATADV_CNT_NC_CODE,
+
+       /**
+        * @BATADV_CNT_NC_CODE_BYTES: transmitted nc-combined traffic bytes
+        *  counter
+        */
        BATADV_CNT_NC_CODE_BYTES,
+
+       /**
+        * @BATADV_CNT_NC_RECODE: transmitted nc-recombined traffic packet
+        *  counter
+        */
        BATADV_CNT_NC_RECODE,
+
+       /**
+        * @BATADV_CNT_NC_RECODE_BYTES: transmitted nc-recombined traffic bytes
+        *  counter
+        */
        BATADV_CNT_NC_RECODE_BYTES,
+
+       /**
+        * @BATADV_CNT_NC_BUFFER: counter for packets buffered for later nc
+        *  decoding
+        */
        BATADV_CNT_NC_BUFFER,
+
+       /**
+        * @BATADV_CNT_NC_DECODE: received and nc-decoded traffic packet counter
+        */
        BATADV_CNT_NC_DECODE,
+
+       /**
+        * @BATADV_CNT_NC_DECODE_BYTES: received and nc-decoded traffic bytes
+        *  counter
+        */
        BATADV_CNT_NC_DECODE_BYTES,
+
+       /**
+        * @BATADV_CNT_NC_DECODE_FAILED: received and decode-failed traffic
+        *  packet counter
+        */
        BATADV_CNT_NC_DECODE_FAILED,
+
+       /**
+        * @BATADV_CNT_NC_SNIFFED: counter for nc-decoded packets received in
+        *  promisc mode.
+        */
        BATADV_CNT_NC_SNIFFED,
 #endif
+
+       /** @BATADV_CNT_NUM: number of traffic counters */
        BATADV_CNT_NUM,
 };
 
 /**
  * struct batadv_priv_tt - per mesh interface translation table data
- * @vn: translation table version number
- * @ogm_append_cnt: counter of number of OGMs containing the local tt diff
- * @local_changes: changes registered in an originator interval
- * @changes_list: tracks tt local changes within an originator interval
- * @local_hash: local translation table hash table
- * @global_hash: global translation table hash table
- * @req_list: list of pending & unanswered tt_requests
- * @roam_list: list of the last roaming events of each client limiting the
- *  number of roaming events to avoid route flapping
- * @changes_list_lock: lock protecting changes_list
- * @req_list_lock: lock protecting req_list
- * @roam_list_lock: lock protecting roam_list
- * @last_changeset: last tt changeset this host has generated
- * @last_changeset_len: length of last tt changeset this host has generated
- * @last_changeset_lock: lock protecting last_changeset & last_changeset_len
- * @commit_lock: prevents from executing a local TT commit while reading the
- *  local table. The local TT commit is made up by two operations (data
- *  structure update and metdata -CRC/TTVN- recalculation) and they have to be
- *  executed atomically in order to avoid another thread to read the
- *  table/metadata between those.
- * @work: work queue callback item for translation table purging
  */
 struct batadv_priv_tt {
+       /** @vn: translation table version number */
        atomic_t vn;
+
+       /**
+        * @ogm_append_cnt: counter of number of OGMs containing the local tt
+        *  diff
+        */
        atomic_t ogm_append_cnt;
+
+       /** @local_changes: changes registered in an originator interval */
        atomic_t local_changes;
+
+       /**
+        * @changes_list: tracks tt local changes within an originator interval
+        */
        struct list_head changes_list;
+
+       /** @local_hash: local translation table hash table */
        struct batadv_hashtable *local_hash;
+
+       /** @global_hash: global translation table hash table */
        struct batadv_hashtable *global_hash;
+
+       /** @req_list: list of pending & unanswered tt_requests */
        struct hlist_head req_list;
+
+       /**
+        * @roam_list: list of the last roaming events of each client limiting
+        *  the number of roaming events to avoid route flapping
+        */
        struct list_head roam_list;
-       spinlock_t changes_list_lock; /* protects changes */
-       spinlock_t req_list_lock; /* protects req_list */
-       spinlock_t roam_list_lock; /* protects roam_list */
+
+       /** @changes_list_lock: lock protecting changes_list */
+       spinlock_t changes_list_lock;
+
+       /** @req_list_lock: lock protecting req_list */
+       spinlock_t req_list_lock;
+
+       /** @roam_list_lock: lock protecting roam_list */
+       spinlock_t roam_list_lock;
+
+       /** @last_changeset: last tt changeset this host has generated */
        unsigned char *last_changeset;
+
+       /**
+        * @last_changeset_len: length of last tt changeset this host has
+        *  generated
+        */
        s16 last_changeset_len;
-       /* protects last_changeset & last_changeset_len */
+
+       /**
+        * @last_changeset_lock: lock protecting last_changeset &
+        *  last_changeset_len
+        */
        spinlock_t last_changeset_lock;
-       /* prevents from executing a commit while reading the table */
+
+       /**
+        * @commit_lock: prevents from executing a local TT commit while reading
+        *  the local table. The local TT commit is made up by two operations
+        *  (data structure update and metdata -CRC/TTVN- recalculation) and
+        *  they have to be executed atomically in order to avoid another thread
+        *  to read the table/metadata between those.
+        */
        spinlock_t commit_lock;
+
+       /** @work: work queue callback item for translation table purging */
        struct delayed_work work;
 };
 
@@ -675,31 +1003,57 @@ struct batadv_priv_tt {
 
 /**
  * struct batadv_priv_bla - per mesh interface bridge loope avoidance data
- * @num_requests: number of bla requests in flight
- * @claim_hash: hash table containing mesh nodes this host has claimed
- * @backbone_hash: hash table containing all detected backbone gateways
- * @loopdetect_addr: MAC address used for own loopdetection frames
- * @loopdetect_lasttime: time when the loopdetection frames were sent
- * @loopdetect_next: how many periods to wait for the next loopdetect process
- * @bcast_duplist: recently received broadcast packets array (for broadcast
- *  duplicate suppression)
- * @bcast_duplist_curr: index of last broadcast packet added to bcast_duplist
- * @bcast_duplist_lock: lock protecting bcast_duplist & bcast_duplist_curr
- * @claim_dest: local claim data (e.g. claim group)
- * @work: work queue callback item for cleanups & bla announcements
  */
 struct batadv_priv_bla {
+       /** @num_requests: number of bla requests in flight */
        atomic_t num_requests;
+
+       /**
+        * @claim_hash: hash table containing mesh nodes this host has claimed
+        */
        struct batadv_hashtable *claim_hash;
+
+       /**
+        * @backbone_hash: hash table containing all detected backbone gateways
+        */
        struct batadv_hashtable *backbone_hash;
+
+       /** @loopdetect_addr: MAC address used for own loopdetection frames */
        u8 loopdetect_addr[ETH_ALEN];
+
+       /**
+        * @loopdetect_lasttime: time when the loopdetection frames were sent
+        */
        unsigned long loopdetect_lasttime;
+
+       /**
+        * @loopdetect_next: how many periods to wait for the next loopdetect
+        *  process
+        */
        atomic_t loopdetect_next;
+
+       /**
+        * @bcast_duplist: recently received broadcast packets array (for
+        *  broadcast duplicate suppression)
+        */
        struct batadv_bcast_duplist_entry bcast_duplist[BATADV_DUPLIST_SIZE];
+
+       /**
+        * @bcast_duplist_curr: index of last broadcast packet added to
+        *  bcast_duplist
+        */
        int bcast_duplist_curr;
-       /* protects bcast_duplist & bcast_duplist_curr */
+
+       /**
+        * @bcast_duplist_lock: lock protecting bcast_duplist &
+        *  bcast_duplist_curr
+        */
        spinlock_t bcast_duplist_lock;
+
+       /** @claim_dest: local claim data (e.g. claim group) */
        struct batadv_bla_claim_dst claim_dest;
+
+       /** @work: work queue callback item for cleanups & bla announcements */
        struct delayed_work work;
 };
 #endif
@@ -708,68 +1062,94 @@ struct batadv_priv_bla {
 
 /**
  * struct batadv_priv_debug_log - debug logging data
- * @log_buff: buffer holding the logs (ring bufer)
- * @log_start: index of next character to read
- * @log_end: index of next character to write
- * @lock: lock protecting log_buff, log_start & log_end
- * @queue_wait: log reader's wait queue
  */
 struct batadv_priv_debug_log {
+       /** @log_buff: buffer holding the logs (ring bufer) */
        char log_buff[BATADV_LOG_BUF_LEN];
+
+       /** @log_start: index of next character to read */
        unsigned long log_start;
+
+       /** @log_end: index of next character to write */
        unsigned long log_end;
-       spinlock_t lock; /* protects log_buff, log_start and log_end */
+
+       /** @lock: lock protecting log_buff, log_start & log_end */
+       spinlock_t lock;
+
+       /** @queue_wait: log reader's wait queue */
        wait_queue_head_t queue_wait;
 };
 #endif
 
 /**
  * struct batadv_priv_gw - per mesh interface gateway data
- * @gateway_list: list of available gateway nodes
- * @list_lock: lock protecting gateway_list & curr_gw
- * @curr_gw: pointer to currently selected gateway node
- * @mode: gateway operation: off, client or server (see batadv_gw_modes)
- * @sel_class: gateway selection class (applies if gw_mode client)
- * @bandwidth_down: advertised uplink download bandwidth (if gw_mode server)
- * @bandwidth_up: advertised uplink upload bandwidth (if gw_mode server)
- * @reselect: bool indicating a gateway re-selection is in progress
  */
 struct batadv_priv_gw {
+       /** @gateway_list: list of available gateway nodes */
        struct hlist_head gateway_list;
-       spinlock_t list_lock; /* protects gateway_list & curr_gw */
-       struct batadv_gw_node __rcu *curr_gw;  /* rcu protected pointer */
+
+       /** @list_lock: lock protecting gateway_list & curr_gw */
+       spinlock_t list_lock;
+
+       /** @curr_gw: pointer to currently selected gateway node */
+       struct batadv_gw_node __rcu *curr_gw;
+
+       /**
+        * @mode: gateway operation: off, client or server (see batadv_gw_modes)
+        */
        atomic_t mode;
+
+       /** @sel_class: gateway selection class (applies if gw_mode client) */
        atomic_t sel_class;
+
+       /**
+        * @bandwidth_down: advertised uplink download bandwidth (if gw_mode
+        *  server)
+        */
        atomic_t bandwidth_down;
+
+       /**
+        * @bandwidth_up: advertised uplink upload bandwidth (if gw_mode server)
+        */
        atomic_t bandwidth_up;
+
+       /** @reselect: bool indicating a gateway re-selection is in progress */
        atomic_t reselect;
 };
 
 /**
  * struct batadv_priv_tvlv - per mesh interface tvlv data
- * @container_list: list of registered tvlv containers to be sent with each OGM
- * @handler_list: list of the various tvlv content handlers
- * @container_list_lock: protects tvlv container list access
- * @handler_list_lock: protects handler list access
  */
 struct batadv_priv_tvlv {
+       /**
+        * @container_list: list of registered tvlv containers to be sent with
+        *  each OGM
+        */
        struct hlist_head container_list;
+
+       /** @handler_list: list of the various tvlv content handlers */
        struct hlist_head handler_list;
-       spinlock_t container_list_lock; /* protects container_list */
-       spinlock_t handler_list_lock; /* protects handler_list */
+
+       /** @container_list_lock: protects tvlv container list access */
+       spinlock_t container_list_lock;
+
+       /** @handler_list_lock: protects handler list access */
+       spinlock_t handler_list_lock;
 };
 
 #ifdef CONFIG_BATMAN_ADV_DAT
 
 /**
  * struct batadv_priv_dat - per mesh interface DAT private data
- * @addr: node DAT address
- * @hash: hashtable representing the local ARP cache
- * @work: work queue callback item for cache purging
  */
 struct batadv_priv_dat {
+       /** @addr: node DAT address */
        batadv_dat_addr_t addr;
+
+       /** @hash: hashtable representing the local ARP cache */
        struct batadv_hashtable *hash;
+
+       /** @work: work queue callback item for cache purging */
        struct delayed_work work;
 };
 #endif
@@ -777,375 +1157,582 @@ struct batadv_priv_dat {
 #ifdef CONFIG_BATMAN_ADV_MCAST
 /**
  * struct batadv_mcast_querier_state - IGMP/MLD querier state when bridged
- * @exists: whether a querier exists in the mesh
- * @shadowing: if a querier exists, whether it is potentially shadowing
- *  multicast listeners (i.e. querier is behind our own bridge segment)
  */
 struct batadv_mcast_querier_state {
+       /** @exists: whether a querier exists in the mesh */
        bool exists;
+
+       /**
+        * @shadowing: if a querier exists, whether it is potentially shadowing
+        *  multicast listeners (i.e. querier is behind our own bridge segment)
+        */
        bool shadowing;
 };
 
 /**
  * struct batadv_priv_mcast - per mesh interface mcast data
- * @mla_list: list of multicast addresses we are currently announcing via TT
- * @want_all_unsnoopables_list: a list of orig_nodes wanting all unsnoopable
- *  multicast traffic
- * @want_all_ipv4_list: a list of orig_nodes wanting all IPv4 multicast traffic
- * @want_all_ipv6_list: a list of orig_nodes wanting all IPv6 multicast traffic
- * @querier_ipv4: the current state of an IGMP querier in the mesh
- * @querier_ipv6: the current state of an MLD querier in the mesh
- * @flags: the flags we have last sent in our mcast tvlv
- * @enabled: whether the multicast tvlv is currently enabled
- * @bridged: whether the soft interface has a bridge on top
- * @num_disabled: number of nodes that have no mcast tvlv
- * @num_want_all_unsnoopables: number of nodes wanting unsnoopable IP traffic
- * @num_want_all_ipv4: counter for items in want_all_ipv4_list
- * @num_want_all_ipv6: counter for items in want_all_ipv6_list
- * @want_lists_lock: lock for protecting modifications to mcast want lists
- *  (traversals are rcu-locked)
- * @work: work queue callback item for multicast TT and TVLV updates
  */
 struct batadv_priv_mcast {
+       /**
+        * @mla_list: list of multicast addresses we are currently announcing
+        *  via TT
+        */
        struct hlist_head mla_list; /* see __batadv_mcast_mla_update() */
+
+       /**
+        * @want_all_unsnoopables_list: a list of orig_nodes wanting all
+        *  unsnoopable multicast traffic
+        */
        struct hlist_head want_all_unsnoopables_list;
+
+       /**
+        * @want_all_ipv4_list: a list of orig_nodes wanting all IPv4 multicast
+        *  traffic
+        */
        struct hlist_head want_all_ipv4_list;
+
+       /**
+        * @want_all_ipv6_list: a list of orig_nodes wanting all IPv6 multicast
+        *  traffic
+        */
        struct hlist_head want_all_ipv6_list;
+
+       /** @querier_ipv4: the current state of an IGMP querier in the mesh */
        struct batadv_mcast_querier_state querier_ipv4;
+
+       /** @querier_ipv6: the current state of an MLD querier in the mesh */
        struct batadv_mcast_querier_state querier_ipv6;
+
+       /** @flags: the flags we have last sent in our mcast tvlv */
        u8 flags;
+
+       /** @enabled: whether the multicast tvlv is currently enabled */
        bool enabled;
+
+       /** @bridged: whether the soft interface has a bridge on top */
        bool bridged;
+
+       /** @num_disabled: number of nodes that have no mcast tvlv */
        atomic_t num_disabled;
+
+       /**
+        * @num_want_all_unsnoopables: number of nodes wanting unsnoopable IP
+        *  traffic
+        */
        atomic_t num_want_all_unsnoopables;
+
+       /** @num_want_all_ipv4: counter for items in want_all_ipv4_list */
        atomic_t num_want_all_ipv4;
+
+       /** @num_want_all_ipv6: counter for items in want_all_ipv6_list */
        atomic_t num_want_all_ipv6;
-       /* protects want_all_{unsnoopables,ipv4,ipv6}_list */
+
+       /**
+        * @want_lists_lock: lock for protecting modifications to mcasts
+        *  want_all_{unsnoopables,ipv4,ipv6}_list (traversals are rcu-locked)
+        */
        spinlock_t want_lists_lock;
+
+       /** @work: work queue callback item for multicast TT and TVLV updates */
        struct delayed_work work;
 };
 #endif
 
 /**
  * struct batadv_priv_nc - per mesh interface network coding private data
- * @work: work queue callback item for cleanup
- * @debug_dir: dentry for nc subdir in batman-adv directory in debugfs
- * @min_tq: only consider neighbors for encoding if neigh_tq > min_tq
- * @max_fwd_delay: maximum packet forward delay to allow coding of packets
- * @max_buffer_time: buffer time for sniffed packets used to decoding
- * @timestamp_fwd_flush: timestamp of last forward packet queue flush
- * @timestamp_sniffed_purge: timestamp of last sniffed packet queue purge
- * @coding_hash: Hash table used to buffer skbs while waiting for another
- *  incoming skb to code it with. Skbs are added to the buffer just before being
- *  forwarded in routing.c
- * @decoding_hash: Hash table used to buffer skbs that might be needed to decode
- *  a received coded skb. The buffer is used for 1) skbs arriving on the
- *  soft-interface; 2) skbs overheard on the hard-interface; and 3) skbs
- *  forwarded by batman-adv.
  */
 struct batadv_priv_nc {
+       /** @work: work queue callback item for cleanup */
        struct delayed_work work;
+
+       /**
+        * @debug_dir: dentry for nc subdir in batman-adv directory in debugfs
+        */
        struct dentry *debug_dir;
+
+       /**
+        * @min_tq: only consider neighbors for encoding if neigh_tq > min_tq
+        */
        u8 min_tq;
+
+       /**
+        * @max_fwd_delay: maximum packet forward delay to allow coding of
+        *  packets
+        */
        u32 max_fwd_delay;
+
+       /**
+        * @max_buffer_time: buffer time for sniffed packets used to decoding
+        */
        u32 max_buffer_time;
+
+       /**
+        * @timestamp_fwd_flush: timestamp of last forward packet queue flush
+        */
        unsigned long timestamp_fwd_flush;
+
+       /**
+        * @timestamp_sniffed_purge: timestamp of last sniffed packet queue
+        *  purge
+        */
        unsigned long timestamp_sniffed_purge;
+
+       /**
+        * @coding_hash: Hash table used to buffer skbs while waiting for
+        *  another incoming skb to code it with. Skbs are added to the buffer
+        *  just before being forwarded in routing.c
+        */
        struct batadv_hashtable *coding_hash;
+
+       /**
+        * @decoding_hash: Hash table used to buffer skbs that might be needed
+        *  to decode a received coded skb. The buffer is used for 1) skbs
+        *  arriving on the soft-interface; 2) skbs overheard on the
+        *  hard-interface; and 3) skbs forwarded by batman-adv.
+        */
        struct batadv_hashtable *decoding_hash;
 };
 
 /**
  * struct batadv_tp_unacked - unacked packet meta-information
- * @seqno: seqno of the unacked packet
- * @len: length of the packet
- * @list: list node for batadv_tp_vars::unacked_list
  *
  * This struct is supposed to represent a buffer unacked packet. However, since
  * the purpose of the TP meter is to count the traffic only, there is no need to
  * store the entire sk_buff, the starting offset and the length are enough
  */
 struct batadv_tp_unacked {
+       /** @seqno: seqno of the unacked packet */
        u32 seqno;
+
+       /** @len: length of the packet */
        u16 len;
+
+       /** @list: list node for &batadv_tp_vars.unacked_list */
        struct list_head list;
 };
 
 /**
  * enum batadv_tp_meter_role - Modus in tp meter session
- * @BATADV_TP_RECEIVER: Initialized as receiver
- * @BATADV_TP_SENDER: Initialized as sender
  */
 enum batadv_tp_meter_role {
+       /** @BATADV_TP_RECEIVER: Initialized as receiver */
        BATADV_TP_RECEIVER,
+
+       /** @BATADV_TP_SENDER: Initialized as sender */
        BATADV_TP_SENDER
 };
 
 /**
  * struct batadv_tp_vars - tp meter private variables per session
- * @list: list node for bat_priv::tp_list
- * @timer: timer for ack (receiver) and retry (sender)
- * @bat_priv: pointer to the mesh object
- * @start_time: start time in jiffies
- * @other_end: mac address of remote
- * @role: receiver/sender modi
- * @sending: sending binary semaphore: 1 if sending, 0 is not
- * @reason: reason for a stopped session
- * @finish_work: work item for the finishing procedure
- * @test_length: test length in milliseconds
- * @session: TP session identifier
- * @icmp_uid: local ICMP "socket" index
- * @dec_cwnd: decimal part of the cwnd used during linear growth
- * @cwnd: current size of the congestion window
- * @cwnd_lock: lock do protect @cwnd & @dec_cwnd
- * @ss_threshold: Slow Start threshold. Once cwnd exceeds this value the
- *  connection switches to the Congestion Avoidance state
- * @last_acked: last acked byte
- * @last_sent: last sent byte, not yet acked
- * @tot_sent: amount of data sent/ACKed so far
- * @dup_acks: duplicate ACKs counter
- * @fast_recovery: true if in Fast Recovery mode
- * @recover: last sent seqno when entering Fast Recovery
- * @rto: sender timeout
- * @srtt: smoothed RTT scaled by 2^3
- * @rttvar: RTT variation scaled by 2^2
- * @more_bytes: waiting queue anchor when waiting for more ack/retry timeout
- * @prerandom_offset: offset inside the prerandom buffer
- * @prerandom_lock: spinlock protecting access to prerandom_offset
- * @last_recv: last in-order received packet
- * @unacked_list: list of unacked packets (meta-info only)
- * @unacked_lock: protect unacked_list
- * @last_recv_time: time time (jiffies) a msg was received
- * @refcount: number of context where the object is used
- * @rcu: struct used for freeing in an RCU-safe manner
  */
 struct batadv_tp_vars {
+       /** @list: list node for &bat_priv.tp_list */
        struct hlist_node list;
+
+       /** @timer: timer for ack (receiver) and retry (sender) */
        struct timer_list timer;
+
+       /** @bat_priv: pointer to the mesh object */
        struct batadv_priv *bat_priv;
+
+       /** @start_time: start time in jiffies */
        unsigned long start_time;
+
+       /** @other_end: mac address of remote */
        u8 other_end[ETH_ALEN];
+
+       /** @role: receiver/sender modi */
        enum batadv_tp_meter_role role;
+
+       /** @sending: sending binary semaphore: 1 if sending, 0 is not */
        atomic_t sending;
+
+       /** @reason: reason for a stopped session */
        enum batadv_tp_meter_reason reason;
+
+       /** @finish_work: work item for the finishing procedure */
        struct delayed_work finish_work;
+
+       /** @test_length: test length in milliseconds */
        u32 test_length;
+
+       /** @session: TP session identifier */
        u8 session[2];
+
+       /** @icmp_uid: local ICMP "socket" index */
        u8 icmp_uid;
 
        /* sender variables */
+
+       /** @dec_cwnd: decimal part of the cwnd used during linear growth */
        u16 dec_cwnd;
+
+       /** @cwnd: current size of the congestion window */
        u32 cwnd;
-       spinlock_t cwnd_lock; /* Protects cwnd & dec_cwnd */
+
+       /** @cwnd_lock: lock do protect @cwnd & @dec_cwnd */
+       spinlock_t cwnd_lock;
+
+       /**
+        * @ss_threshold: Slow Start threshold. Once cwnd exceeds this value the
+        *  connection switches to the Congestion Avoidance state
+        */
        u32 ss_threshold;
+
+       /** @last_acked: last acked byte */
        atomic_t last_acked;
+
+       /** @last_sent: last sent byte, not yet acked */
        u32 last_sent;
+
+       /** @tot_sent: amount of data sent/ACKed so far */
        atomic64_t tot_sent;
+
+       /** @dup_acks: duplicate ACKs counter */
        atomic_t dup_acks;
+
+       /** @fast_recovery: true if in Fast Recovery mode */
        bool fast_recovery;
+
+       /** @recover: last sent seqno when entering Fast Recovery */
        u32 recover;
+
+       /** @rto: sender timeout */
        u32 rto;
+
+       /** @srtt: smoothed RTT scaled by 2^3 */
        u32 srtt;
+
+       /** @rttvar: RTT variation scaled by 2^2 */
        u32 rttvar;
+
+       /**
+        * @more_bytes: waiting queue anchor when waiting for more ack/retry
+        *  timeout
+        */
        wait_queue_head_t more_bytes;
+
+       /** @prerandom_offset: offset inside the prerandom buffer */
        u32 prerandom_offset;
-       spinlock_t prerandom_lock; /* Protects prerandom_offset */
+
+       /** @prerandom_lock: spinlock protecting access to prerandom_offset */
+       spinlock_t prerandom_lock;
 
        /* receiver variables */
+
+       /** @last_recv: last in-order received packet */
        u32 last_recv;
+
+       /** @unacked_list: list of unacked packets (meta-info only) */
        struct list_head unacked_list;
-       spinlock_t unacked_lock; /* Protects unacked_list */
+
+       /** @unacked_lock: protect unacked_list */
+       spinlock_t unacked_lock;
+
+       /** @last_recv_time: time time (jiffies) a msg was received */
        unsigned long last_recv_time;
+
+       /** @refcount: number of context where the object is used */
        struct kref refcount;
+
+       /** @rcu: struct used for freeing in an RCU-safe manner */
        struct rcu_head rcu;
 };
 
 /**
  * struct batadv_softif_vlan - per VLAN attributes set
- * @bat_priv: pointer to the mesh object
- * @vid: VLAN identifier
- * @kobj: kobject for sysfs vlan subdirectory
- * @ap_isolation: AP isolation state
- * @tt: TT private attributes (VLAN specific)
- * @list: list node for bat_priv::softif_vlan_list
- * @refcount: number of context where this object is currently in use
- * @rcu: struct used for freeing in a RCU-safe manner
  */
 struct batadv_softif_vlan {
+       /** @bat_priv: pointer to the mesh object */
        struct batadv_priv *bat_priv;
+
+       /** @vid: VLAN identifier */
        unsigned short vid;
+
+       /** @kobj: kobject for sysfs vlan subdirectory */
        struct kobject *kobj;
+
+       /** @ap_isolation: AP isolation state */
        atomic_t ap_isolation;          /* boolean */
+
+       /** @tt: TT private attributes (VLAN specific) */
        struct batadv_vlan_tt tt;
+
+       /** @list: list node for &bat_priv.softif_vlan_list */
        struct hlist_node list;
+
+       /**
+        * @refcount: number of context where this object is currently in use
+        */
        struct kref refcount;
+
+       /** @rcu: struct used for freeing in a RCU-safe manner */
        struct rcu_head rcu;
 };
 
 /**
  * struct batadv_priv_bat_v - B.A.T.M.A.N. V per soft-interface private data
- * @ogm_buff: buffer holding the OGM packet
- * @ogm_buff_len: length of the OGM packet buffer
- * @ogm_seqno: OGM sequence number - used to identify each OGM
- * @ogm_wq: workqueue used to schedule OGM transmissions
  */
 struct batadv_priv_bat_v {
+       /** @ogm_buff: buffer holding the OGM packet */
        unsigned char *ogm_buff;
+
+       /** @ogm_buff_len: length of the OGM packet buffer */
        int ogm_buff_len;
+
+       /** @ogm_seqno: OGM sequence number - used to identify each OGM */
        atomic_t ogm_seqno;
+
+       /** @ogm_wq: workqueue used to schedule OGM transmissions */
        struct delayed_work ogm_wq;
 };
 
 /**
  * struct batadv_priv - per mesh interface data
- * @mesh_state: current status of the mesh (inactive/active/deactivating)
- * @soft_iface: net device which holds this struct as private data
- * @bat_counters: mesh internal traffic statistic counters (see batadv_counters)
- * @aggregated_ogms: bool indicating whether OGM aggregation is enabled
- * @bonding: bool indicating whether traffic bonding is enabled
- * @fragmentation: bool indicating whether traffic fragmentation is enabled
- * @packet_size_max: max packet size that can be transmitted via
- *  multiple fragmented skbs or a single frame if fragmentation is disabled
- * @frag_seqno: incremental counter to identify chains of egress fragments
- * @bridge_loop_avoidance: bool indicating whether bridge loop avoidance is
- *  enabled
- * @distributed_arp_table: bool indicating whether distributed ARP table is
- *  enabled
- * @multicast_mode: Enable or disable multicast optimizations on this node's
- *  sender/originating side
- * @orig_interval: OGM broadcast interval in milliseconds
- * @hop_penalty: penalty which will be applied to an OGM's tq-field on every hop
- * @log_level: configured log level (see batadv_dbg_level)
- * @isolation_mark: the skb->mark value used to match packets for AP isolation
- * @isolation_mark_mask: bitmask identifying the bits in skb->mark to be used
- *  for the isolation mark
- * @bcast_seqno: last sent broadcast packet sequence number
- * @bcast_queue_left: number of remaining buffered broadcast packet slots
- * @batman_queue_left: number of remaining OGM packet slots
- * @num_ifaces: number of interfaces assigned to this mesh interface
- * @mesh_obj: kobject for sysfs mesh subdirectory
- * @debug_dir: dentry for debugfs batman-adv subdirectory
- * @forw_bat_list: list of aggregated OGMs that will be forwarded
- * @forw_bcast_list: list of broadcast packets that will be rebroadcasted
- * @tp_list: list of tp sessions
- * @tp_num: number of currently active tp sessions
- * @orig_hash: hash table containing mesh participants (orig nodes)
- * @forw_bat_list_lock: lock protecting forw_bat_list
- * @forw_bcast_list_lock: lock protecting forw_bcast_list
- * @tp_list_lock: spinlock protecting @tp_list
- * @orig_work: work queue callback item for orig node purging
- * @primary_if: one of the hard-interfaces assigned to this mesh interface
- *  becomes the primary interface
- * @algo_ops: routing algorithm used by this mesh interface
- * @softif_vlan_list: a list of softif_vlan structs, one per VLAN created on top
- *  of the mesh interface represented by this object
- * @softif_vlan_list_lock: lock protecting softif_vlan_list
- * @bla: bridge loope avoidance data
- * @debug_log: holding debug logging relevant data
- * @gw: gateway data
- * @tt: translation table data
- * @tvlv: type-version-length-value data
- * @dat: distributed arp table data
- * @mcast: multicast data
- * @network_coding: bool indicating whether network coding is enabled
- * @nc: network coding data
- * @bat_v: B.A.T.M.A.N. V per soft-interface private data
  */
 struct batadv_priv {
+       /**
+        * @mesh_state: current status of the mesh
+        *  (inactive/active/deactivating)
+        */
        atomic_t mesh_state;
+
+       /** @soft_iface: net device which holds this struct as private data */
        struct net_device *soft_iface;
+
+       /**
+        * @bat_counters: mesh internal traffic statistic counters (see
+        *  batadv_counters)
+        */
        u64 __percpu *bat_counters; /* Per cpu counters */
+
+       /**
+        * @aggregated_ogms: bool indicating whether OGM aggregation is enabled
+        */
        atomic_t aggregated_ogms;
+
+       /** @bonding: bool indicating whether traffic bonding is enabled */
        atomic_t bonding;
+
+       /**
+        * @fragmentation: bool indicating whether traffic fragmentation is
+        *  enabled
+        */
        atomic_t fragmentation;
+
+       /**
+        * @packet_size_max: max packet size that can be transmitted via
+        *  multiple fragmented skbs or a single frame if fragmentation is
+        *  disabled
+        */
        atomic_t packet_size_max;
+
+       /**
+        * @frag_seqno: incremental counter to identify chains of egress
+        *  fragments
+        */
        atomic_t frag_seqno;
+
 #ifdef CONFIG_BATMAN_ADV_BLA
+       /**
+        * @bridge_loop_avoidance: bool indicating whether bridge loop
+        *  avoidance is enabled
+        */
        atomic_t bridge_loop_avoidance;
 #endif
+
 #ifdef CONFIG_BATMAN_ADV_DAT
+       /**
+        * @distributed_arp_table: bool indicating whether distributed ARP table
+        *  is enabled
+        */
        atomic_t distributed_arp_table;
 #endif
+
 #ifdef CONFIG_BATMAN_ADV_MCAST
+       /**
+        * @multicast_mode: Enable or disable multicast optimizations on this
+        *  node's sender/originating side
+        */
        atomic_t multicast_mode;
 #endif
+
+       /** @orig_interval: OGM broadcast interval in milliseconds */
        atomic_t orig_interval;
+
+       /**
+        * @hop_penalty: penalty which will be applied to an OGM's tq-field on
+        *  every hop
+        */
        atomic_t hop_penalty;
+
 #ifdef CONFIG_BATMAN_ADV_DEBUG
+       /** @log_level: configured log level (see batadv_dbg_level) */
        atomic_t log_level;
 #endif
+
+       /**
+        * @isolation_mark: the skb->mark value used to match packets for AP
+        *  isolation
+        */
        u32 isolation_mark;
+
+       /**
+        * @isolation_mark_mask: bitmask identifying the bits in skb->mark to be
+        *  used for the isolation mark
+        */
        u32 isolation_mark_mask;
+
+       /** @bcast_seqno: last sent broadcast packet sequence number */
        atomic_t bcast_seqno;
+
+       /**
+        * @bcast_queue_left: number of remaining buffered broadcast packet
+        *  slots
+        */
        atomic_t bcast_queue_left;
+
+       /** @batman_queue_left: number of remaining OGM packet slots */
        atomic_t batman_queue_left;
+
+       /** @num_ifaces: number of interfaces assigned to this mesh interface */
        char num_ifaces;
+
+       /** @mesh_obj: kobject for sysfs mesh subdirectory */
        struct kobject *mesh_obj;
+
+       /** @debug_dir: dentry for debugfs batman-adv subdirectory */
        struct dentry *debug_dir;
+
+       /** @forw_bat_list: list of aggregated OGMs that will be forwarded */
        struct hlist_head forw_bat_list;
+
+       /**
+        * @forw_bcast_list: list of broadcast packets that will be
+        *  rebroadcasted
+        */
        struct hlist_head forw_bcast_list;
+
+       /** @tp_list: list of tp sessions */
        struct hlist_head tp_list;
+
+       /** @tp_num: number of currently active tp sessions */
        struct batadv_hashtable *orig_hash;
-       spinlock_t forw_bat_list_lock; /* protects forw_bat_list */
-       spinlock_t forw_bcast_list_lock; /* protects forw_bcast_list */
-       spinlock_t tp_list_lock; /* protects tp_list */
+
+       /** @orig_hash: hash table containing mesh participants (orig nodes) */
+       spinlock_t forw_bat_list_lock;
+
+       /** @forw_bat_list_lock: lock protecting forw_bat_list */
+       spinlock_t forw_bcast_list_lock;
+
+       /** @forw_bcast_list_lock: lock protecting forw_bcast_list */
+       spinlock_t tp_list_lock;
+
+       /** @tp_list_lock: spinlock protecting @tp_list */
        atomic_t tp_num;
+
+       /** @orig_work: work queue callback item for orig node purging */
        struct delayed_work orig_work;
+
+       /**
+        * @primary_if: one of the hard-interfaces assigned to this mesh
+        *  interface becomes the primary interface
+        */
        struct batadv_hard_iface __rcu *primary_if;  /* rcu protected pointer */
+
+       /** @algo_ops: routing algorithm used by this mesh interface */
        struct batadv_algo_ops *algo_ops;
+
+       /**
+        * @softif_vlan_list: a list of softif_vlan structs, one per VLAN
+        *  created on top of the mesh interface represented by this object
+        */
        struct hlist_head softif_vlan_list;
-       spinlock_t softif_vlan_list_lock; /* protects softif_vlan_list */
+
+       /** @softif_vlan_list_lock: lock protecting softif_vlan_list */
+       spinlock_t softif_vlan_list_lock;
+
 #ifdef CONFIG_BATMAN_ADV_BLA
+       /** @bla: bridge loope avoidance data */
        struct batadv_priv_bla bla;
 #endif
+
 #ifdef CONFIG_BATMAN_ADV_DEBUG
+       /** @debug_log: holding debug logging relevant data */
        struct batadv_priv_debug_log *debug_log;
 #endif
+
+       /** @gw: gateway data */
        struct batadv_priv_gw gw;
+
+       /** @tt: translation table data */
        struct batadv_priv_tt tt;
+
+       /** @tvlv: type-version-length-value data */
        struct batadv_priv_tvlv tvlv;
+
 #ifdef CONFIG_BATMAN_ADV_DAT
+       /** @dat: distributed arp table data */
        struct batadv_priv_dat dat;
 #endif
+
 #ifdef CONFIG_BATMAN_ADV_MCAST
+       /** @mcast: multicast data */
        struct batadv_priv_mcast mcast;
 #endif
+
 #ifdef CONFIG_BATMAN_ADV_NC
+       /**
+        * @network_coding: bool indicating whether network coding is enabled
+        */
        atomic_t network_coding;
+
+       /** @nc: network coding data */
        struct batadv_priv_nc nc;
 #endif /* CONFIG_BATMAN_ADV_NC */
+
 #ifdef CONFIG_BATMAN_ADV_BATMAN_V
+       /** @bat_v: B.A.T.M.A.N. V per soft-interface private data */
        struct batadv_priv_bat_v bat_v;
 #endif
 };
 
 /**
  * struct batadv_socket_client - layer2 icmp socket client data
- * @queue_list: packet queue for packets destined for this socket client
- * @queue_len: number of packets in the packet queue (queue_list)
- * @index: socket client's index in the batadv_socket_client_hash
- * @lock: lock protecting queue_list, queue_len & index
- * @queue_wait: socket client's wait queue
- * @bat_priv: pointer to soft_iface this client belongs to
  */
 struct batadv_socket_client {
+       /**
+        * @queue_list: packet queue for packets destined for this socket client
+        */
        struct list_head queue_list;
+
+       /** @queue_len: number of packets in the packet queue (queue_list) */
        unsigned int queue_len;
+
+       /** @index: socket client's index in the batadv_socket_client_hash */
        unsigned char index;
-       spinlock_t lock; /* protects queue_list, queue_len & index */
+
+       /** @lock: lock protecting queue_list, queue_len & index */
+       spinlock_t lock;
+
+       /** @queue_wait: socket client's wait queue */
        wait_queue_head_t queue_wait;
+
+       /** @bat_priv: pointer to soft_iface this client belongs to */
        struct batadv_priv *bat_priv;
 };
 
 /**
  * struct batadv_socket_packet - layer2 icmp packet for socket client
- * @list: list node for batadv_socket_client::queue_list
- * @icmp_len: size of the layer2 icmp packet
- * @icmp_packet: layer2 icmp packet
  */
 struct batadv_socket_packet {
+       /** @list: list node for &batadv_socket_client.queue_list */
        struct list_head list;
+
+       /** @icmp_len: size of the layer2 icmp packet */
        size_t icmp_len;
+
+       /** @icmp_packet: layer2 icmp packet */
        u8 icmp_packet[BATADV_ICMP_MAX_PACKET_SIZE];
 };
 
@@ -1153,312 +1740,432 @@ struct batadv_socket_packet {
 
 /**
  * struct batadv_bla_backbone_gw - batman-adv gateway bridged into the LAN
- * @orig: originator address of backbone node (mac address of primary iface)
- * @vid: vlan id this gateway was detected on
- * @hash_entry: hlist node for batadv_priv_bla::backbone_hash
- * @bat_priv: pointer to soft_iface this backbone gateway belongs to
- * @lasttime: last time we heard of this backbone gw
- * @wait_periods: grace time for bridge forward delays and bla group forming at
- *  bootup phase - no bcast traffic is formwared until it has elapsed
- * @request_sent: if this bool is set to true we are out of sync with this
- *  backbone gateway - no bcast traffic is formwared until the situation was
- *  resolved
- * @crc: crc16 checksum over all claims
- * @crc_lock: lock protecting crc
- * @report_work: work struct for reporting detected loops
- * @refcount: number of contexts the object is used
- * @rcu: struct used for freeing in an RCU-safe manner
  */
 struct batadv_bla_backbone_gw {
+       /**
+        * @orig: originator address of backbone node (mac address of primary
+        *  iface)
+        */
        u8 orig[ETH_ALEN];
+
+       /** @vid: vlan id this gateway was detected on */
        unsigned short vid;
+
+       /** @hash_entry: hlist node for &batadv_priv_bla.backbone_hash */
        struct hlist_node hash_entry;
+
+       /** @bat_priv: pointer to soft_iface this backbone gateway belongs to */
        struct batadv_priv *bat_priv;
+
+       /** @lasttime: last time we heard of this backbone gw */
        unsigned long lasttime;
+
+       /**
+        * @wait_periods: grace time for bridge forward delays and bla group
+        *  forming at bootup phase - no bcast traffic is formwared until it has
+        *  elapsed
+        */
        atomic_t wait_periods;
+
+       /**
+        * @request_sent: if this bool is set to true we are out of sync with
+        *  this backbone gateway - no bcast traffic is formwared until the
+        *  situation was resolved
+        */
        atomic_t request_sent;
+
+       /** @crc: crc16 checksum over all claims */
        u16 crc;
-       spinlock_t crc_lock; /* protects crc */
+
+       /** @crc_lock: lock protecting crc */
+       spinlock_t crc_lock;
+
+       /** @report_work: work struct for reporting detected loops */
        struct work_struct report_work;
+
+       /** @refcount: number of contexts the object is used */
        struct kref refcount;
+
+       /** @rcu: struct used for freeing in an RCU-safe manner */
        struct rcu_head rcu;
 };
 
 /**
  * struct batadv_bla_claim - claimed non-mesh client structure
- * @addr: mac address of claimed non-mesh client
- * @vid: vlan id this client was detected on
- * @backbone_gw: pointer to backbone gw claiming this client
- * @backbone_lock: lock protecting backbone_gw pointer
- * @lasttime: last time we heard of claim (locals only)
- * @hash_entry: hlist node for batadv_priv_bla::claim_hash
- * @refcount: number of contexts the object is used
- * @rcu: struct used for freeing in an RCU-safe manner
  */
 struct batadv_bla_claim {
+       /** @addr: mac address of claimed non-mesh client */
        u8 addr[ETH_ALEN];
+
+       /** @vid: vlan id this client was detected on */
        unsigned short vid;
+
+       /** @backbone_gw: pointer to backbone gw claiming this client */
        struct batadv_bla_backbone_gw *backbone_gw;
-       spinlock_t backbone_lock; /* protects backbone_gw */
+
+       /** @backbone_lock: lock protecting backbone_gw pointer */
+       spinlock_t backbone_lock;
+
+       /** @lasttime: last time we heard of claim (locals only) */
        unsigned long lasttime;
+
+       /** @hash_entry: hlist node for &batadv_priv_bla.claim_hash */
        struct hlist_node hash_entry;
+
+       /** @refcount: number of contexts the object is used */
        struct rcu_head rcu;
+
+       /** @rcu: struct used for freeing in an RCU-safe manner */
        struct kref refcount;
 };
 #endif
 
 /**
  * struct batadv_tt_common_entry - tt local & tt global common data
- * @addr: mac address of non-mesh client
- * @vid: VLAN identifier
- * @hash_entry: hlist node for batadv_priv_tt::local_hash or for
- *  batadv_priv_tt::global_hash
- * @flags: various state handling flags (see batadv_tt_client_flags)
- * @added_at: timestamp used for purging stale tt common entries
- * @refcount: number of contexts the object is used
- * @rcu: struct used for freeing in an RCU-safe manner
  */
 struct batadv_tt_common_entry {
+       /** @addr: mac address of non-mesh client */
        u8 addr[ETH_ALEN];
+
+       /** @vid: VLAN identifier */
        unsigned short vid;
+
+       /**
+        * @hash_entry: hlist node for &batadv_priv_tt.local_hash or for
+        *  &batadv_priv_tt.global_hash
+        */
        struct hlist_node hash_entry;
+
+       /** @flags: various state handling flags (see batadv_tt_client_flags) */
        u16 flags;
+
+       /** @added_at: timestamp used for purging stale tt common entries */
        unsigned long added_at;
+
+       /** @refcount: number of contexts the object is used */
        struct kref refcount;
+
+       /** @rcu: struct used for freeing in an RCU-safe manner */
        struct rcu_head rcu;
 };
 
 /**
  * struct batadv_tt_local_entry - translation table local entry data
- * @common: general translation table data
- * @last_seen: timestamp used for purging stale tt local entries
- * @vlan: soft-interface vlan of the entry
  */
 struct batadv_tt_local_entry {
+       /** @common: general translation table data */
        struct batadv_tt_common_entry common;
+
+       /** @last_seen: timestamp used for purging stale tt local entries */
        unsigned long last_seen;
+
+       /** @vlan: soft-interface vlan of the entry */
        struct batadv_softif_vlan *vlan;
 };
 
 /**
  * struct batadv_tt_global_entry - translation table global entry data
- * @common: general translation table data
- * @orig_list: list of orig nodes announcing this non-mesh client
- * @orig_list_count: number of items in the orig_list
- * @list_lock: lock protecting orig_list
- * @roam_at: time at which TT_GLOBAL_ROAM was set
  */
 struct batadv_tt_global_entry {
+       /** @common: general translation table data */
        struct batadv_tt_common_entry common;
+
+       /** @orig_list: list of orig nodes announcing this non-mesh client */
        struct hlist_head orig_list;
+
+       /** @orig_list_count: number of items in the orig_list */
        atomic_t orig_list_count;
-       spinlock_t list_lock;   /* protects orig_list */
+
+       /** @list_lock: lock protecting orig_list */
+       spinlock_t list_lock;
+
+       /** @roam_at: time at which TT_GLOBAL_ROAM was set */
        unsigned long roam_at;
 };
 
 /**
  * struct batadv_tt_orig_list_entry - orig node announcing a non-mesh client
- * @orig_node: pointer to orig node announcing this non-mesh client
- * @ttvn: translation table version number which added the non-mesh client
- * @flags: per orig entry TT sync flags
- * @list: list node for batadv_tt_global_entry::orig_list
- * @refcount: number of contexts the object is used
- * @rcu: struct used for freeing in an RCU-safe manner
  */
 struct batadv_tt_orig_list_entry {
+       /** @orig_node: pointer to orig node announcing this non-mesh client */
        struct batadv_orig_node *orig_node;
+
+       /**
+        * @ttvn: translation table version number which added the non-mesh
+        *  client
+        */
        u8 ttvn;
+
+       /** @flags: per orig entry TT sync flags */
        u8 flags;
+
+       /** @list: list node for &batadv_tt_global_entry.orig_list */
        struct hlist_node list;
+
+       /** @refcount: number of contexts the object is used */
        struct kref refcount;
+
+       /** @rcu: struct used for freeing in an RCU-safe manner */
        struct rcu_head rcu;
 };
 
 /**
  * struct batadv_tt_change_node - structure for tt changes occurred
- * @list: list node for batadv_priv_tt::changes_list
- * @change: holds the actual translation table diff data
  */
 struct batadv_tt_change_node {
+       /** @list: list node for &batadv_priv_tt.changes_list */
        struct list_head list;
+
+       /** @change: holds the actual translation table diff data */
        struct batadv_tvlv_tt_change change;
 };
 
 /**
  * struct batadv_tt_req_node - data to keep track of the tt requests in flight
- * @addr: mac address address of the originator this request was sent to
- * @issued_at: timestamp used for purging stale tt requests
- * @refcount: number of contexts the object is used by
- * @list: list node for batadv_priv_tt::req_list
  */
 struct batadv_tt_req_node {
+       /**
+        * @addr: mac address address of the originator this request was sent to
+        */
        u8 addr[ETH_ALEN];
+
+       /** @issued_at: timestamp used for purging stale tt requests */
        unsigned long issued_at;
+
+       /** @refcount: number of contexts the object is used by */
        struct kref refcount;
+
+       /** @list: list node for &batadv_priv_tt.req_list */
        struct hlist_node list;
 };
 
 /**
  * struct batadv_tt_roam_node - roaming client data
- * @addr: mac address of the client in the roaming phase
- * @counter: number of allowed roaming events per client within a single
- *  OGM interval (changes are committed with each OGM)
- * @first_time: timestamp used for purging stale roaming node entries
- * @list: list node for batadv_priv_tt::roam_list
  */
 struct batadv_tt_roam_node {
+       /** @addr: mac address of the client in the roaming phase */
        u8 addr[ETH_ALEN];
+
+       /**
+        * @counter: number of allowed roaming events per client within a single
+        * OGM interval (changes are committed with each OGM)
+        */
        atomic_t counter;
+
+       /**
+        * @first_time: timestamp used for purging stale roaming node entries
+        */
        unsigned long first_time;
+
+       /** @list: list node for &batadv_priv_tt.roam_list */
        struct list_head list;
 };
 
 /**
  * struct batadv_nc_node - network coding node
- * @list: next and prev pointer for the list handling
- * @addr: the node's mac address
- * @refcount: number of contexts the object is used by
- * @rcu: struct used for freeing in an RCU-safe manner
- * @orig_node: pointer to corresponding orig node struct
- * @last_seen: timestamp of last ogm received from this node
  */
 struct batadv_nc_node {
+       /** @list: next and prev pointer for the list handling */
        struct list_head list;
+
+       /** @addr: the node's mac address */
        u8 addr[ETH_ALEN];
+
+       /** @refcount: number of contexts the object is used by */
        struct kref refcount;
+
+       /** @rcu: struct used for freeing in an RCU-safe manner */
        struct rcu_head rcu;
+
+       /** @orig_node: pointer to corresponding orig node struct */
        struct batadv_orig_node *orig_node;
+
+       /** @last_seen: timestamp of last ogm received from this node */
        unsigned long last_seen;
 };
 
 /**
  * struct batadv_nc_path - network coding path
- * @hash_entry: next and prev pointer for the list handling
- * @rcu: struct used for freeing in an RCU-safe manner
- * @refcount: number of contexts the object is used by
- * @packet_list: list of buffered packets for this path
- * @packet_list_lock: access lock for packet list
- * @next_hop: next hop (destination) of path
- * @prev_hop: previous hop (source) of path
- * @last_valid: timestamp for last validation of path
  */
 struct batadv_nc_path {
+       /** @hash_entry: next and prev pointer for the list handling */
        struct hlist_node hash_entry;
+
+       /** @rcu: struct used for freeing in an RCU-safe manner */
        struct rcu_head rcu;
+
+       /** @refcount: number of contexts the object is used by */
        struct kref refcount;
+
+       /** @packet_list: list of buffered packets for this path */
        struct list_head packet_list;
-       spinlock_t packet_list_lock; /* Protects packet_list */
+
+       /** @packet_list_lock: access lock for packet list */
+       spinlock_t packet_list_lock;
+
+       /** @next_hop: next hop (destination) of path */
        u8 next_hop[ETH_ALEN];
+
+       /** @prev_hop: previous hop (source) of path */
        u8 prev_hop[ETH_ALEN];
+
+       /** @last_valid: timestamp for last validation of path */
        unsigned long last_valid;
 };
 
 /**
  * struct batadv_nc_packet - network coding packet used when coding and
  *  decoding packets
- * @list: next and prev pointer for the list handling
- * @packet_id: crc32 checksum of skb data
- * @timestamp: field containing the info when the packet was added to path
- * @neigh_node: pointer to original next hop neighbor of skb
- * @skb: skb which can be encoded or used for decoding
- * @nc_path: pointer to path this nc packet is attached to
  */
 struct batadv_nc_packet {
+       /** @list: next and prev pointer for the list handling */
        struct list_head list;
+
+       /** @packet_id: crc32 checksum of skb data */
        __be32 packet_id;
+
+       /**
+        * @timestamp: field containing the info when the packet was added to
+        *  path
+        */
        unsigned long timestamp;
+
+       /** @neigh_node: pointer to original next hop neighbor of skb */
        struct batadv_neigh_node *neigh_node;
+
+       /** @skb: skb which can be encoded or used for decoding */
        struct sk_buff *skb;
+
+       /** @nc_path: pointer to path this nc packet is attached to */
        struct batadv_nc_path *nc_path;
 };
 
 /**
  * struct batadv_skb_cb - control buffer structure used to store private data
  *  relevant to batman-adv in the skb->cb buffer in skbs.
- * @decoded: Marks a skb as decoded, which is checked when searching for coding
- *  opportunities in network-coding.c
- * @num_bcasts: Counter for broadcast packet retransmissions
  */
 struct batadv_skb_cb {
+       /**
+        * @decoded: Marks a skb as decoded, which is checked when searching for
+        *  coding opportunities in network-coding.c
+        */
        bool decoded;
+
+       /** @num_bcasts: Counter for broadcast packet retransmissions */
        unsigned int num_bcasts;
 };
 
 /**
  * struct batadv_forw_packet - structure for bcast packets to be sent/forwarded
- * @list: list node for batadv_priv::forw_{bat,bcast}_list
- * @cleanup_list: list node for purging functions
- * @send_time: execution time for delayed_work (packet sending)
- * @own: bool for locally generated packets (local OGMs are re-scheduled after
- *  sending)
- * @skb: bcast packet's skb buffer
- * @packet_len: size of aggregated OGM packet inside the skb buffer
- * @direct_link_flags: direct link flags for aggregated OGM packets
- * @num_packets: counter for aggregated OGMv1 packets
- * @delayed_work: work queue callback item for packet sending
- * @if_incoming: pointer to incoming hard-iface or primary iface if
- *  locally generated packet
- * @if_outgoing: packet where the packet should be sent to, or NULL if
- *  unspecified
- * @queue_left: The queue (counter) this packet was applied to
  */
 struct batadv_forw_packet {
+       /**
+        * @list: list node for &batadv_priv.forw.bcast_list and
+        *  &batadv_priv.forw.bat_list
+        */
        struct hlist_node list;
+
+       /** @cleanup_list: list node for purging functions */
        struct hlist_node cleanup_list;
+
+       /** @send_time: execution time for delayed_work (packet sending) */
        unsigned long send_time;
+
+       /**
+        * @own: bool for locally generated packets (local OGMs are re-scheduled
+        * after sending)
+        */
        u8 own;
+
+       /** @skb: bcast packet's skb buffer */
        struct sk_buff *skb;
+
+       /** @packet_len: size of aggregated OGM packet inside the skb buffer */
        u16 packet_len;
+
+       /** @direct_link_flags: direct link flags for aggregated OGM packets */
        u32 direct_link_flags;
+
+       /** @num_packets: counter for aggregated OGMv1 packets */
        u8 num_packets;
+
+       /** @delayed_work: work queue callback item for packet sending */
        struct delayed_work delayed_work;
+
+       /**
+        * @if_incoming: pointer to incoming hard-iface or primary iface if
+        *  locally generated packet
+        */
        struct batadv_hard_iface *if_incoming;
+
+       /**
+        * @if_outgoing: packet where the packet should be sent to, or NULL if
+        *  unspecified
+        */
        struct batadv_hard_iface *if_outgoing;
+
+       /** @queue_left: The queue (counter) this packet was applied to */
        atomic_t *queue_left;
 };
 
 /**
  * struct batadv_algo_iface_ops - mesh algorithm callbacks (interface specific)
- * @activate: start routing mechanisms when hard-interface is brought up
- *  (optional)
- * @enable: init routing info when hard-interface is enabled
- * @disable: de-init routing info when hard-interface is disabled
- * @update_mac: (re-)init mac addresses of the protocol information
- *  belonging to this hard-interface
- * @primary_set: called when primary interface is selected / changed
  */
 struct batadv_algo_iface_ops {
+       /**
+        * @activate: start routing mechanisms when hard-interface is brought up
+        *  (optional)
+        */
        void (*activate)(struct batadv_hard_iface *hard_iface);
+
+       /** @enable: init routing info when hard-interface is enabled */
        int (*enable)(struct batadv_hard_iface *hard_iface);
+
+       /** @disable: de-init routing info when hard-interface is disabled */
        void (*disable)(struct batadv_hard_iface *hard_iface);
+
+       /**
+        * @update_mac: (re-)init mac addresses of the protocol information
+        *  belonging to this hard-interface
+        */
        void (*update_mac)(struct batadv_hard_iface *hard_iface);
+
+       /** @primary_set: called when primary interface is selected / changed */
        void (*primary_set)(struct batadv_hard_iface *hard_iface);
 };
 
 /**
  * struct batadv_algo_neigh_ops - mesh algorithm callbacks (neighbour specific)
- * @hardif_init: called on creation of single hop entry
- *  (optional)
- * @cmp: compare the metrics of two neighbors for their respective outgoing
- *  interfaces
- * @is_similar_or_better: check if neigh1 is equally similar or better than
- *  neigh2 for their respective outgoing interface from the metric prospective
- * @print: print the single hop neighbor list (optional)
- * @dump: dump neighbors to a netlink socket (optional)
  */
 struct batadv_algo_neigh_ops {
+       /** @hardif_init: called on creation of single hop entry (optional) */
        void (*hardif_init)(struct batadv_hardif_neigh_node *neigh);
+
+       /**
+        * @cmp: compare the metrics of two neighbors for their respective
+        *  outgoing interfaces
+        */
        int (*cmp)(struct batadv_neigh_node *neigh1,
                   struct batadv_hard_iface *if_outgoing1,
                   struct batadv_neigh_node *neigh2,
                   struct batadv_hard_iface *if_outgoing2);
+
+       /**
+        * @is_similar_or_better: check if neigh1 is equally similar or better
+        *  than neigh2 for their respective outgoing interface from the metric
+        *  prospective
+        */
        bool (*is_similar_or_better)(struct batadv_neigh_node *neigh1,
                                     struct batadv_hard_iface *if_outgoing1,
                                     struct batadv_neigh_node *neigh2,
                                     struct batadv_hard_iface *if_outgoing2);
+
 #ifdef CONFIG_BATMAN_ADV_DEBUGFS
+       /** @print: print the single hop neighbor list (optional) */
        void (*print)(struct batadv_priv *priv, struct seq_file *seq);
 #endif
+
+       /** @dump: dump neighbors to a netlink socket (optional) */
        void (*dump)(struct sk_buff *msg, struct netlink_callback *cb,
                     struct batadv_priv *priv,
                     struct batadv_hard_iface *hard_iface);
@@ -1466,24 +2173,36 @@ struct batadv_algo_neigh_ops {
 
 /**
  * struct batadv_algo_orig_ops - mesh algorithm callbacks (originator specific)
- * @free: free the resources allocated by the routing algorithm for an orig_node
- *  object (optional)
- * @add_if: ask the routing algorithm to apply the needed changes to the
- *  orig_node due to a new hard-interface being added into the mesh (optional)
- * @del_if: ask the routing algorithm to apply the needed changes to the
- *  orig_node due to an hard-interface being removed from the mesh (optional)
- * @print: print the originator table (optional)
- * @dump: dump originators to a netlink socket (optional)
  */
 struct batadv_algo_orig_ops {
+       /**
+        * @free: free the resources allocated by the routing algorithm for an
+        *  orig_node object (optional)
+        */
        void (*free)(struct batadv_orig_node *orig_node);
+
+       /**
+        * @add_if: ask the routing algorithm to apply the needed changes to the
+        *  orig_node due to a new hard-interface being added into the mesh
+        *  (optional)
+        */
        int (*add_if)(struct batadv_orig_node *orig_node, int max_if_num);
+
+       /**
+        * @del_if: ask the routing algorithm to apply the needed changes to the
+        *  orig_node due to an hard-interface being removed from the mesh
+        *  (optional)
+        */
        int (*del_if)(struct batadv_orig_node *orig_node, int max_if_num,
                      int del_if_num);
+
 #ifdef CONFIG_BATMAN_ADV_DEBUGFS
+       /** @print: print the originator table (optional) */
        void (*print)(struct batadv_priv *priv, struct seq_file *seq,
                      struct batadv_hard_iface *hard_iface);
 #endif
+
+       /** @dump: dump originators to a netlink socket (optional) */
        void (*dump)(struct sk_buff *msg, struct netlink_callback *cb,
                     struct batadv_priv *priv,
                     struct batadv_hard_iface *hard_iface);
@@ -1491,158 +2210,213 @@ struct batadv_algo_orig_ops {
 
 /**
  * struct batadv_algo_gw_ops - mesh algorithm callbacks (GW specific)
- * @init_sel_class: initialize GW selection class (optional)
- * @store_sel_class: parse and stores a new GW selection class (optional)
- * @show_sel_class: prints the current GW selection class (optional)
- * @get_best_gw_node: select the best GW from the list of available nodes
- *  (optional)
- * @is_eligible: check if a newly discovered GW is a potential candidate for
- *  the election as best GW (optional)
- * @print: print the gateway table (optional)
- * @dump: dump gateways to a netlink socket (optional)
  */
 struct batadv_algo_gw_ops {
+       /** @init_sel_class: initialize GW selection class (optional) */
        void (*init_sel_class)(struct batadv_priv *bat_priv);
+
+       /**
+        * @store_sel_class: parse and stores a new GW selection class
+        *  (optional)
+        */
        ssize_t (*store_sel_class)(struct batadv_priv *bat_priv, char *buff,
                                   size_t count);
+
+       /** @show_sel_class: prints the current GW selection class (optional) */
        ssize_t (*show_sel_class)(struct batadv_priv *bat_priv, char *buff);
+
+       /**
+        * @get_best_gw_node: select the best GW from the list of available
+        *  nodes (optional)
+        */
        struct batadv_gw_node *(*get_best_gw_node)
                (struct batadv_priv *bat_priv);
+
+       /**
+        * @is_eligible: check if a newly discovered GW is a potential candidate
+        *  for the election as best GW (optional)
+        */
        bool (*is_eligible)(struct batadv_priv *bat_priv,
                            struct batadv_orig_node *curr_gw_orig,
                            struct batadv_orig_node *orig_node);
+
 #ifdef CONFIG_BATMAN_ADV_DEBUGFS
+       /** @print: print the gateway table (optional) */
        void (*print)(struct batadv_priv *bat_priv, struct seq_file *seq);
 #endif
+
+       /** @dump: dump gateways to a netlink socket (optional) */
        void (*dump)(struct sk_buff *msg, struct netlink_callback *cb,
                     struct batadv_priv *priv);
 };
 
 /**
  * struct batadv_algo_ops - mesh algorithm callbacks
- * @list: list node for the batadv_algo_list
- * @name: name of the algorithm
- * @iface: callbacks related to interface handling
- * @neigh: callbacks related to neighbors handling
- * @orig: callbacks related to originators handling
- * @gw: callbacks related to GW mode
  */
 struct batadv_algo_ops {
+       /** @list: list node for the batadv_algo_list */
        struct hlist_node list;
+
+       /** @name: name of the algorithm */
        char *name;
+
+       /** @iface: callbacks related to interface handling */
        struct batadv_algo_iface_ops iface;
+
+       /** @neigh: callbacks related to neighbors handling */
        struct batadv_algo_neigh_ops neigh;
+
+       /** @orig: callbacks related to originators handling */
        struct batadv_algo_orig_ops orig;
+
+       /** @gw: callbacks related to GW mode */
        struct batadv_algo_gw_ops gw;
 };
 
 /**
  * struct batadv_dat_entry - it is a single entry of batman-adv ARP backend. It
  * is used to stored ARP entries needed for the global DAT cache
- * @ip: the IPv4 corresponding to this DAT/ARP entry
- * @mac_addr: the MAC address associated to the stored IPv4
- * @vid: the vlan ID associated to this entry
- * @last_update: time in jiffies when this entry was refreshed last time
- * @hash_entry: hlist node for batadv_priv_dat::hash
- * @refcount: number of contexts the object is used
- * @rcu: struct used for freeing in an RCU-safe manner
  */
 struct batadv_dat_entry {
+       /** @ip: the IPv4 corresponding to this DAT/ARP entry */
        __be32 ip;
+
+       /** @mac_addr: the MAC address associated to the stored IPv4 */
        u8 mac_addr[ETH_ALEN];
+
+       /** @vid: the vlan ID associated to this entry */
        unsigned short vid;
+
+       /**
+        * @last_update: time in jiffies when this entry was refreshed last time
+        */
        unsigned long last_update;
+
+       /** @hash_entry: hlist node for &batadv_priv_dat.hash */
        struct hlist_node hash_entry;
+
+       /** @refcount: number of contexts the object is used */
        struct kref refcount;
+
+       /** @rcu: struct used for freeing in an RCU-safe manner */
        struct rcu_head rcu;
 };
 
 /**
  * struct batadv_hw_addr - a list entry for a MAC address
- * @list: list node for the linking of entries
- * @addr: the MAC address of this list entry
  */
 struct batadv_hw_addr {
+       /** @list: list node for the linking of entries */
        struct hlist_node list;
+
+       /** @addr: the MAC address of this list entry */
        unsigned char addr[ETH_ALEN];
 };
 
 /**
  * struct batadv_dat_candidate - candidate destination for DAT operations
- * @type: the type of the selected candidate. It can one of the following:
- *       - BATADV_DAT_CANDIDATE_NOT_FOUND
- *       - BATADV_DAT_CANDIDATE_ORIG
- * @orig_node: if type is BATADV_DAT_CANDIDATE_ORIG this field points to the
- *            corresponding originator node structure
  */
 struct batadv_dat_candidate {
+       /**
+        * @type: the type of the selected candidate. It can one of the
+        *  following:
+        *        - BATADV_DAT_CANDIDATE_NOT_FOUND
+        *        - BATADV_DAT_CANDIDATE_ORIG
+        */
        int type;
+
+       /**
+        * @orig_node: if type is BATADV_DAT_CANDIDATE_ORIG this field points to
+        * the corresponding originator node structure
+        */
        struct batadv_orig_node *orig_node;
 };
 
 /**
  * struct batadv_tvlv_container - container for tvlv appended to OGMs
- * @list: hlist node for batadv_priv_tvlv::container_list
- * @tvlv_hdr: tvlv header information needed to construct the tvlv
- * @refcount: number of contexts the object is used
  */
 struct batadv_tvlv_container {
+       /** @list: hlist node for &batadv_priv_tvlv.container_list */
        struct hlist_node list;
+
+       /** @tvlv_hdr: tvlv header information needed to construct the tvlv */
        struct batadv_tvlv_hdr tvlv_hdr;
+
+       /** @refcount: number of contexts the object is used */
        struct kref refcount;
 };
 
 /**
  * struct batadv_tvlv_handler - handler for specific tvlv type and version
- * @list: hlist node for batadv_priv_tvlv::handler_list
- * @ogm_handler: handler callback which is given the tvlv payload to process on
- *  incoming OGM packets
- * @unicast_handler: handler callback which is given the tvlv payload to process
- *  on incoming unicast tvlv packets
- * @type: tvlv type this handler feels responsible for
- * @version: tvlv version this handler feels responsible for
- * @flags: tvlv handler flags
- * @refcount: number of contexts the object is used
- * @rcu: struct used for freeing in an RCU-safe manner
  */
 struct batadv_tvlv_handler {
+       /** @list: hlist node for &batadv_priv_tvlv.handler_list */
        struct hlist_node list;
+
+       /**
+        * @ogm_handler: handler callback which is given the tvlv payload to
+        *  process on incoming OGM packets
+        */
        void (*ogm_handler)(struct batadv_priv *bat_priv,
                            struct batadv_orig_node *orig,
                            u8 flags, void *tvlv_value, u16 tvlv_value_len);
+
+       /**
+        * @unicast_handler: handler callback which is given the tvlv payload to
+        *  process on incoming unicast tvlv packets
+        */
        int (*unicast_handler)(struct batadv_priv *bat_priv,
                               u8 *src, u8 *dst,
                               void *tvlv_value, u16 tvlv_value_len);
+
+       /** @type: tvlv type this handler feels responsible for */
        u8 type;
+
+       /** @version: tvlv version this handler feels responsible for */
        u8 version;
+
+       /** @flags: tvlv handler flags */
        u8 flags;
+
+       /** @refcount: number of contexts the object is used */
        struct kref refcount;
+
+       /** @rcu: struct used for freeing in an RCU-safe manner */
        struct rcu_head rcu;
 };
 
 /**
  * enum batadv_tvlv_handler_flags - tvlv handler flags definitions
- * @BATADV_TVLV_HANDLER_OGM_CIFNOTFND: tvlv ogm processing function will call
- *  this handler even if its type was not found (with no data)
- * @BATADV_TVLV_HANDLER_OGM_CALLED: interval tvlv handling flag - the API marks
- *  a handler as being called, so it won't be called if the
- *  BATADV_TVLV_HANDLER_OGM_CIFNOTFND flag was set
  */
 enum batadv_tvlv_handler_flags {
+       /**
+        * @BATADV_TVLV_HANDLER_OGM_CIFNOTFND: tvlv ogm processing function
+        *  will call this handler even if its type was not found (with no data)
+        */
        BATADV_TVLV_HANDLER_OGM_CIFNOTFND = BIT(1),
+
+       /**
+        * @BATADV_TVLV_HANDLER_OGM_CALLED: interval tvlv handling flag - the
+        *  API marks a handler as being called, so it won't be called if the
+        *  BATADV_TVLV_HANDLER_OGM_CIFNOTFND flag was set
+        */
        BATADV_TVLV_HANDLER_OGM_CALLED = BIT(2),
 };
 
 /**
  * struct batadv_store_mesh_work - Work queue item to detach add/del interface
  *  from sysfs locks
- * @net_dev: netdevice to add/remove to/from batman-adv soft-interface
- * @soft_iface_name: name of soft-interface to modify
- * @work: work queue item
  */
 struct batadv_store_mesh_work {
+       /**
+        * @net_dev: netdevice to add/remove to/from batman-adv soft-interface
+        */
        struct net_device *net_dev;
+
+       /** @soft_iface_name: name of soft-interface to modify */
        char soft_iface_name[IFNAMSIZ];
+
+       /** @work: work queue item */
        struct work_struct work;
 };
 
index d0ef0a8..015f465 100644 (file)
@@ -1262,19 +1262,20 @@ static int br_dev_newlink(struct net *src_net, struct net_device *dev,
        struct net_bridge *br = netdev_priv(dev);
        int err;
 
+       err = register_netdevice(dev);
+       if (err)
+               return err;
+
        if (tb[IFLA_ADDRESS]) {
                spin_lock_bh(&br->lock);
                br_stp_change_bridge_id(br, nla_data(tb[IFLA_ADDRESS]));
                spin_unlock_bh(&br->lock);
        }
 
-       err = register_netdevice(dev);
-       if (err)
-               return err;
-
        err = br_changelink(dev, tb, data, extack);
        if (err)
-               unregister_netdevice(dev);
+               br_dev_delete(dev, NULL);
+
        return err;
 }
 
index 723f25e..b1be0dc 100644 (file)
@@ -272,10 +272,7 @@ static ssize_t group_addr_show(struct device *d,
                               struct device_attribute *attr, char *buf)
 {
        struct net_bridge *br = to_bridge(d);
-       return sprintf(buf, "%x:%x:%x:%x:%x:%x\n",
-                      br->group_addr[0], br->group_addr[1],
-                      br->group_addr[2], br->group_addr[3],
-                      br->group_addr[4], br->group_addr[5]);
+       return sprintf(buf, "%pM\n", br->group_addr);
 }
 
 static ssize_t group_addr_store(struct device *d,
@@ -284,14 +281,11 @@ static ssize_t group_addr_store(struct device *d,
 {
        struct net_bridge *br = to_bridge(d);
        u8 new_addr[6];
-       int i;
 
        if (!ns_capable(dev_net(br->dev)->user_ns, CAP_NET_ADMIN))
                return -EPERM;
 
-       if (sscanf(buf, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx",
-                  &new_addr[0], &new_addr[1], &new_addr[2],
-                  &new_addr[3], &new_addr[4], &new_addr[5]) != 6)
+       if (!mac_pton(buf, new_addr))
                return -EINVAL;
 
        if (!is_link_local_ether_addr(new_addr))
@@ -306,8 +300,7 @@ static ssize_t group_addr_store(struct device *d,
                return restart_syscall();
 
        spin_lock_bh(&br->lock);
-       for (i = 0; i < 6; i++)
-               br->group_addr[i] = new_addr[i];
+       ether_addr_copy(br->group_addr, new_addr);
        spin_unlock_bh(&br->lock);
 
        br->group_addr_set = true;
index 1fd0a9c..6dbbba8 100644 (file)
@@ -11,7 +11,7 @@ obj-$(CONFIG_SYSCTL) += sysctl_net_core.o
 obj-y               += dev.o ethtool.o dev_addr_lists.o dst.o netevent.o \
                        neighbour.o rtnetlink.o utils.o link_watch.o filter.o \
                        sock_diag.o dev_ioctl.o tso.o sock_reuseport.o \
-                       fib_notifier.o
+                       fib_notifier.o xdp.o
 
 obj-y += net-sysfs.o
 obj-$(CONFIG_PROC_FS) += net-procfs.o
index c7db399..d7925ef 100644 (file)
@@ -3059,7 +3059,7 @@ int skb_csum_hwoffload_help(struct sk_buff *skb,
 }
 EXPORT_SYMBOL(skb_csum_hwoffload_help);
 
-static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev)
+static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev, bool *again)
 {
        netdev_features_t features;
 
@@ -3083,9 +3083,6 @@ static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device
                    __skb_linearize(skb))
                        goto out_kfree_skb;
 
-               if (validate_xmit_xfrm(skb, features))
-                       goto out_kfree_skb;
-
                /* If packet is not checksummed and device does not
                 * support checksumming for this protocol, complete
                 * checksumming here.
@@ -3102,6 +3099,8 @@ static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device
                }
        }
 
+       skb = validate_xmit_xfrm(skb, features, again);
+
        return skb;
 
 out_kfree_skb:
@@ -3111,7 +3110,7 @@ out_null:
        return NULL;
 }
 
-struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev)
+struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again)
 {
        struct sk_buff *next, *head = NULL, *tail;
 
@@ -3122,7 +3121,7 @@ struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *d
                /* in case skb wont be segmented, point to itself */
                skb->prev = skb;
 
-               skb = validate_xmit_skb(skb, dev);
+               skb = validate_xmit_skb(skb, dev, again);
                if (!skb)
                        continue;
 
@@ -3449,6 +3448,7 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
        struct netdev_queue *txq;
        struct Qdisc *q;
        int rc = -ENOMEM;
+       bool again = false;
 
        skb_reset_mac_header(skb);
 
@@ -3510,7 +3510,7 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
                                     XMIT_RECURSION_LIMIT))
                                goto recursion_alert;
 
-                       skb = validate_xmit_skb(skb, dev);
+                       skb = validate_xmit_skb(skb, dev, &again);
                        if (!skb)
                                goto out;
 
@@ -3906,9 +3906,33 @@ drop:
        return NET_RX_DROP;
 }
 
+static struct netdev_rx_queue *netif_get_rxqueue(struct sk_buff *skb)
+{
+       struct net_device *dev = skb->dev;
+       struct netdev_rx_queue *rxqueue;
+
+       rxqueue = dev->_rx;
+
+       if (skb_rx_queue_recorded(skb)) {
+               u16 index = skb_get_rx_queue(skb);
+
+               if (unlikely(index >= dev->real_num_rx_queues)) {
+                       WARN_ONCE(dev->real_num_rx_queues > 1,
+                                 "%s received packet on queue %u, but number "
+                                 "of RX queues is %u\n",
+                                 dev->name, index, dev->real_num_rx_queues);
+
+                       return rxqueue; /* Return first rxqueue */
+               }
+               rxqueue += index;
+       }
+       return rxqueue;
+}
+
 static u32 netif_receive_generic_xdp(struct sk_buff *skb,
                                     struct bpf_prog *xdp_prog)
 {
+       struct netdev_rx_queue *rxqueue;
        u32 metalen, act = XDP_DROP;
        struct xdp_buff xdp;
        void *orig_data;
@@ -3937,7 +3961,7 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb,
                                     hroom > 0 ? ALIGN(hroom, NET_SKB_PAD) : 0,
                                     troom > 0 ? troom + 128 : 0, GFP_ATOMIC))
                        goto do_drop;
-               if (troom > 0 && __skb_linearize(skb))
+               if (skb_linearize(skb))
                        goto do_drop;
        }
 
@@ -3952,6 +3976,9 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb,
        xdp.data_hard_start = skb->data - skb_headroom(skb);
        orig_data = xdp.data;
 
+       rxqueue = netif_get_rxqueue(skb);
+       xdp.rxq = &rxqueue->xdp_rxq;
+
        act = bpf_prog_run_xdp(xdp_prog, &xdp);
 
        off = xdp.data - orig_data;
@@ -4194,6 +4221,8 @@ static __latent_entropy void net_tx_action(struct softirq_action *h)
                                spin_unlock(root_lock);
                }
        }
+
+       xfrm_dev_backlog(sd);
 }
 
 #if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_ATM_LANE)
@@ -7587,12 +7616,12 @@ void netif_stacked_transfer_operstate(const struct net_device *rootdev,
 }
 EXPORT_SYMBOL(netif_stacked_transfer_operstate);
 
-#ifdef CONFIG_SYSFS
 static int netif_alloc_rx_queues(struct net_device *dev)
 {
        unsigned int i, count = dev->num_rx_queues;
        struct netdev_rx_queue *rx;
        size_t sz = count * sizeof(*rx);
+       int err = 0;
 
        BUG_ON(count < 1);
 
@@ -7602,11 +7631,39 @@ static int netif_alloc_rx_queues(struct net_device *dev)
 
        dev->_rx = rx;
 
-       for (i = 0; i < count; i++)
+       for (i = 0; i < count; i++) {
                rx[i].dev = dev;
+
+               /* XDP RX-queue setup */
+               err = xdp_rxq_info_reg(&rx[i].xdp_rxq, dev, i);
+               if (err < 0)
+                       goto err_rxq_info;
+       }
        return 0;
+
+err_rxq_info:
+       /* Rollback successful reg's and free other resources */
+       while (i--)
+               xdp_rxq_info_unreg(&rx[i].xdp_rxq);
+       kfree(dev->_rx);
+       dev->_rx = NULL;
+       return err;
+}
+
+static void netif_free_rx_queues(struct net_device *dev)
+{
+       unsigned int i, count = dev->num_rx_queues;
+       struct netdev_rx_queue *rx;
+
+       /* netif_alloc_rx_queues alloc failed, resources have been unreg'ed */
+       if (!dev->_rx)
+               return;
+
+       rx = dev->_rx;
+
+       for (i = 0; i < count; i++)
+               xdp_rxq_info_unreg(&rx[i].xdp_rxq);
 }
-#endif
 
 static void netdev_init_one_queue(struct net_device *dev,
                                  struct netdev_queue *queue, void *_unused)
@@ -8167,12 +8224,10 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
                return NULL;
        }
 
-#ifdef CONFIG_SYSFS
        if (rxqs < 1) {
                pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
                return NULL;
        }
-#endif
 
        alloc_size = sizeof(struct net_device);
        if (sizeof_priv) {
@@ -8229,12 +8284,10 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
        if (netif_alloc_netdev_queues(dev))
                goto free_all;
 
-#ifdef CONFIG_SYSFS
        dev->num_rx_queues = rxqs;
        dev->real_num_rx_queues = rxqs;
        if (netif_alloc_rx_queues(dev))
                goto free_all;
-#endif
 
        strcpy(dev->name, name);
        dev->name_assign_type = name_assign_type;
@@ -8273,9 +8326,7 @@ void free_netdev(struct net_device *dev)
 
        might_sleep();
        netif_free_tx_queues(dev);
-#ifdef CONFIG_SYSFS
-       kvfree(dev->_rx);
-#endif
+       netif_free_rx_queues(dev);
 
        kfree(rcu_dereference_protected(dev->ingress_queue, 1));
 
@@ -8875,6 +8926,9 @@ static int __init net_dev_init(void)
 
                skb_queue_head_init(&sd->input_pkt_queue);
                skb_queue_head_init(&sd->process_queue);
+#ifdef CONFIG_XFRM_OFFLOAD
+               skb_queue_head_init(&sd->xfrm_backlog);
+#endif
                INIT_LIST_HEAD(&sd->poll_list);
                sd->output_queue_tailp = &sd->output_queue;
 #ifdef CONFIG_RPS
index 754abe1..acdb94c 100644 (file)
@@ -2684,8 +2684,9 @@ static int __xdp_generic_ok_fwd_dev(struct sk_buff *skb, struct net_device *fwd)
        return 0;
 }
 
-int xdp_do_generic_redirect_map(struct net_device *dev, struct sk_buff *skb,
-                               struct bpf_prog *xdp_prog)
+static int xdp_do_generic_redirect_map(struct net_device *dev,
+                                      struct sk_buff *skb,
+                                      struct bpf_prog *xdp_prog)
 {
        struct redirect_info *ri = this_cpu_ptr(&redirect_info);
        unsigned long map_owner = ri->map_owner;
@@ -4303,6 +4304,25 @@ static u32 xdp_convert_ctx_access(enum bpf_access_type type,
                                      si->dst_reg, si->src_reg,
                                      offsetof(struct xdp_buff, data_end));
                break;
+       case offsetof(struct xdp_md, ingress_ifindex):
+               *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, rxq),
+                                     si->dst_reg, si->src_reg,
+                                     offsetof(struct xdp_buff, rxq));
+               *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_rxq_info, dev),
+                                     si->dst_reg, si->dst_reg,
+                                     offsetof(struct xdp_rxq_info, dev));
+               *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
+                                     bpf_target_off(struct net_device,
+                                                    ifindex, 4, target_size));
+               break;
+       case offsetof(struct xdp_md, rx_queue_index):
+               *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, rxq),
+                                     si->dst_reg, si->src_reg,
+                                     offsetof(struct xdp_buff, rxq));
+               *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
+                                     bpf_target_off(struct xdp_rxq_info,
+                                               queue_index, 4, target_size));
+               break;
        }
 
        return insn - insn_buf;
index cc75488..02db7b1 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/tcp.h>
 #include <net/flow_dissector.h>
 #include <scsi/fc/fc_fcoe.h>
+#include <uapi/linux/batadv_packet.h>
 
 static void dissector_set_key(struct flow_dissector *flow_dissector,
                              enum flow_dissector_key_id key_id)
@@ -437,6 +438,57 @@ __skb_flow_dissect_gre(const struct sk_buff *skb,
        return FLOW_DISSECT_RET_PROTO_AGAIN;
 }
 
+/**
+ * __skb_flow_dissect_batadv() - dissect batman-adv header
+ * @skb: sk_buff to with the batman-adv header
+ * @key_control: flow dissectors control key
+ * @data: raw buffer pointer to the packet, if NULL use skb->data
+ * @p_proto: pointer used to update the protocol to process next
+ * @p_nhoff: pointer used to update inner network header offset
+ * @hlen: packet header length
+ * @flags: any combination of FLOW_DISSECTOR_F_*
+ *
+ * ETH_P_BATMAN packets are tried to be dissected. Only
+ * &struct batadv_unicast packets are actually processed because they contain an
+ * inner ethernet header and are usually followed by actual network header. This
+ * allows the flow dissector to continue processing the packet.
+ *
+ * Return: FLOW_DISSECT_RET_PROTO_AGAIN when &struct batadv_unicast was found,
+ *  FLOW_DISSECT_RET_OUT_GOOD when dissector should stop after encapsulation,
+ *  otherwise FLOW_DISSECT_RET_OUT_BAD
+ */
+static enum flow_dissect_ret
+__skb_flow_dissect_batadv(const struct sk_buff *skb,
+                         struct flow_dissector_key_control *key_control,
+                         void *data, __be16 *p_proto, int *p_nhoff, int hlen,
+                         unsigned int flags)
+{
+       struct {
+               struct batadv_unicast_packet batadv_unicast;
+               struct ethhdr eth;
+       } *hdr, _hdr;
+
+       hdr = __skb_header_pointer(skb, *p_nhoff, sizeof(_hdr), data, hlen,
+                                  &_hdr);
+       if (!hdr)
+               return FLOW_DISSECT_RET_OUT_BAD;
+
+       if (hdr->batadv_unicast.version != BATADV_COMPAT_VERSION)
+               return FLOW_DISSECT_RET_OUT_BAD;
+
+       if (hdr->batadv_unicast.packet_type != BATADV_UNICAST)
+               return FLOW_DISSECT_RET_OUT_BAD;
+
+       *p_proto = hdr->eth.h_proto;
+       *p_nhoff += sizeof(*hdr);
+
+       key_control->flags |= FLOW_DIS_ENCAPSULATION;
+       if (flags & FLOW_DISSECTOR_F_STOP_AT_ENCAP)
+               return FLOW_DISSECT_RET_OUT_GOOD;
+
+       return FLOW_DISSECT_RET_PROTO_AGAIN;
+}
+
 static void
 __skb_flow_dissect_tcp(const struct sk_buff *skb,
                       struct flow_dissector *flow_dissector,
@@ -815,6 +867,11 @@ proto_again:
                                               nhoff, hlen);
                break;
 
+       case htons(ETH_P_BATMAN):
+               fdret = __skb_flow_dissect_batadv(skb, key_control, data,
+                                                 &proto, &nhoff, hlen, flags);
+               break;
+
        default:
                fdret = FLOW_DISSECT_RET_OUT_BAD;
                break;
index b797832..60a71be 100644 (file)
@@ -267,7 +267,7 @@ struct net *get_net_ns_by_id(struct net *net, int id)
        spin_lock_bh(&net->nsid_lock);
        peer = idr_find(&net->netns_ids, id);
        if (peer)
-               get_net(peer);
+               peer = maybe_get_net(peer);
        spin_unlock_bh(&net->nsid_lock);
        rcu_read_unlock();
 
index a592ca0..01e8285 100644 (file)
@@ -1177,12 +1177,12 @@ int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
        int i, new_frags;
        u32 d_off;
 
-       if (!num_frags)
-               return 0;
-
        if (skb_shared(skb) || skb_unclone(skb, gfp_mask))
                return -EINVAL;
 
+       if (!num_frags)
+               goto release;
+
        new_frags = (__skb_pagelen(skb) + PAGE_SIZE - 1) >> PAGE_SHIFT;
        for (i = 0; i < new_frags; i++) {
                page = alloc_page(gfp_mask);
@@ -1238,6 +1238,7 @@ int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
        __skb_fill_page_desc(skb, new_frags - 1, head, 0, d_off);
        skb_shinfo(skb)->nr_frags = new_frags;
 
+release:
        skb_zcopy_clear(skb, false);
        return 0;
 }
@@ -3654,7 +3655,9 @@ normal:
 
                skb_shinfo(nskb)->tx_flags |= skb_shinfo(head_skb)->tx_flags &
                                              SKBTX_SHARED_FRAG;
-               if (skb_zerocopy_clone(nskb, head_skb, GFP_ATOMIC))
+
+               if (skb_orphan_frags(frag_skb, GFP_ATOMIC) ||
+                   skb_zerocopy_clone(nskb, frag_skb, GFP_ATOMIC))
                        goto err;
 
                while (pos < offset + len) {
@@ -3668,6 +3671,11 @@ normal:
 
                                BUG_ON(!nfrags);
 
+                               if (skb_orphan_frags(frag_skb, GFP_ATOMIC) ||
+                                   skb_zerocopy_clone(nskb, frag_skb,
+                                                      GFP_ATOMIC))
+                                       goto err;
+
                                list_skb = list_skb->next;
                        }
 
@@ -3679,9 +3687,6 @@ normal:
                                goto err;
                        }
 
-                       if (unlikely(skb_orphan_frags(frag_skb, GFP_ATOMIC)))
-                               goto err;
-
                        *nskb_frag = *frag;
                        __skb_frag_ref(nskb_frag);
                        size = skb_frag_size(nskb_frag);
diff --git a/net/core/xdp.c b/net/core/xdp.c
new file mode 100644 (file)
index 0000000..097a0f7
--- /dev/null
@@ -0,0 +1,73 @@
+/* net/core/xdp.c
+ *
+ * Copyright (c) 2017 Jesper Dangaard Brouer, Red Hat Inc.
+ * Released under terms in GPL version 2.  See COPYING.
+ */
+#include <linux/types.h>
+#include <linux/mm.h>
+
+#include <net/xdp.h>
+
+#define REG_STATE_NEW          0x0
+#define REG_STATE_REGISTERED   0x1
+#define REG_STATE_UNREGISTERED 0x2
+#define REG_STATE_UNUSED       0x3
+
+void xdp_rxq_info_unreg(struct xdp_rxq_info *xdp_rxq)
+{
+       /* Simplify driver cleanup code paths, allow unreg "unused" */
+       if (xdp_rxq->reg_state == REG_STATE_UNUSED)
+               return;
+
+       WARN(!(xdp_rxq->reg_state == REG_STATE_REGISTERED), "Driver BUG");
+
+       xdp_rxq->reg_state = REG_STATE_UNREGISTERED;
+       xdp_rxq->dev = NULL;
+}
+EXPORT_SYMBOL_GPL(xdp_rxq_info_unreg);
+
+static void xdp_rxq_info_init(struct xdp_rxq_info *xdp_rxq)
+{
+       memset(xdp_rxq, 0, sizeof(*xdp_rxq));
+}
+
+/* Returns 0 on success, negative on failure */
+int xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq,
+                    struct net_device *dev, u32 queue_index)
+{
+       if (xdp_rxq->reg_state == REG_STATE_UNUSED) {
+               WARN(1, "Driver promised not to register this");
+               return -EINVAL;
+       }
+
+       if (xdp_rxq->reg_state == REG_STATE_REGISTERED) {
+               WARN(1, "Missing unregister, handled but fix driver");
+               xdp_rxq_info_unreg(xdp_rxq);
+       }
+
+       if (!dev) {
+               WARN(1, "Missing net_device from driver");
+               return -ENODEV;
+       }
+
+       /* State either UNREGISTERED or NEW */
+       xdp_rxq_info_init(xdp_rxq);
+       xdp_rxq->dev = dev;
+       xdp_rxq->queue_index = queue_index;
+
+       xdp_rxq->reg_state = REG_STATE_REGISTERED;
+       return 0;
+}
+EXPORT_SYMBOL_GPL(xdp_rxq_info_reg);
+
+void xdp_rxq_info_unused(struct xdp_rxq_info *xdp_rxq)
+{
+       xdp_rxq->reg_state = REG_STATE_UNUSED;
+}
+EXPORT_SYMBOL_GPL(xdp_rxq_info_unused);
+
+bool xdp_rxq_info_is_reg(struct xdp_rxq_info *xdp_rxq)
+{
+       return (xdp_rxq->reg_state == REG_STATE_REGISTERED);
+}
+EXPORT_SYMBOL_GPL(xdp_rxq_info_is_reg);
index 8c0ef71..b270e84 100644 (file)
@@ -39,23 +39,6 @@ config IP_DCCP_DEBUG
 
          Just say N.
 
-config NET_DCCPPROBE
-       tristate "DCCP connection probing"
-       depends on PROC_FS && KPROBES
-       ---help---
-       This module allows for capturing the changes to DCCP connection
-       state in response to incoming packets. It is used for debugging
-       DCCP congestion avoidance modules. If you don't understand
-       what was just said, you don't need it: say N.
-
-       Documentation on how to use DCCP connection probing can be found
-       at:
-       
-         http://www.linuxfoundation.org/collaborate/workgroups/networking/dccpprobe
-
-       To compile this code as a module, choose M here: the
-       module will be called dccp_probe.
-
 
 endmenu
 
index 2e7b560..5b4ff37 100644 (file)
@@ -21,9 +21,10 @@ obj-$(subst y,$(CONFIG_IP_DCCP),$(CONFIG_IPV6)) += dccp_ipv6.o
 dccp_ipv6-y := ipv6.o
 
 obj-$(CONFIG_INET_DCCP_DIAG) += dccp_diag.o
-obj-$(CONFIG_NET_DCCPPROBE) += dccp_probe.o
 
 dccp-$(CONFIG_SYSCTL) += sysctl.o
 
 dccp_diag-y := diag.o
-dccp_probe-y := probe.o
+
+# build with local directory for trace.h
+CFLAGS_proto.o := -I$(src)
index 3de0d03..2a24f7d 100644 (file)
@@ -228,7 +228,7 @@ static void dccp_ackvec_add_new(struct dccp_ackvec *av, u32 num_packets,
        }
 
        if (num_cells + dccp_ackvec_buflen(av) >= DCCPAV_MAX_ACKVEC_LEN) {
-               DCCP_CRIT("Ack Vector buffer overflow: dropping old entries\n");
+               DCCP_CRIT("Ack Vector buffer overflow: dropping old entries");
                av->av_overflow = true;
        }
 
diff --git a/net/dccp/probe.c b/net/dccp/probe.c
deleted file mode 100644 (file)
index 3d3fda0..0000000
+++ /dev/null
@@ -1,203 +0,0 @@
-/*
- * dccp_probe - Observe the DCCP flow with kprobes.
- *
- * The idea for this came from Werner Almesberger's umlsim
- * Copyright (C) 2004, Stephen Hemminger <shemminger@osdl.org>
- *
- * Modified for DCCP from Stephen Hemminger's code
- * Copyright (C) 2006, Ian McDonald <ian.mcdonald@jandi.co.nz>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include <linux/kernel.h>
-#include <linux/kprobes.h>
-#include <linux/socket.h>
-#include <linux/dccp.h>
-#include <linux/proc_fs.h>
-#include <linux/module.h>
-#include <linux/kfifo.h>
-#include <linux/vmalloc.h>
-#include <linux/time64.h>
-#include <linux/gfp.h>
-#include <net/net_namespace.h>
-
-#include "dccp.h"
-#include "ccid.h"
-#include "ccids/ccid3.h"
-
-static int port;
-
-static int bufsize = 64 * 1024;
-
-static const char procname[] = "dccpprobe";
-
-static struct {
-       struct kfifo      fifo;
-       spinlock_t        lock;
-       wait_queue_head_t wait;
-       struct timespec64 tstart;
-} dccpw;
-
-static void printl(const char *fmt, ...)
-{
-       va_list args;
-       int len;
-       struct timespec64 now;
-       char tbuf[256];
-
-       va_start(args, fmt);
-       getnstimeofday64(&now);
-
-       now = timespec64_sub(now, dccpw.tstart);
-
-       len = sprintf(tbuf, "%lu.%06lu ",
-                     (unsigned long) now.tv_sec,
-                     (unsigned long) now.tv_nsec / NSEC_PER_USEC);
-       len += vscnprintf(tbuf+len, sizeof(tbuf)-len, fmt, args);
-       va_end(args);
-
-       kfifo_in_locked(&dccpw.fifo, tbuf, len, &dccpw.lock);
-       wake_up(&dccpw.wait);
-}
-
-static int jdccp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
-{
-       const struct inet_sock *inet = inet_sk(sk);
-       struct ccid3_hc_tx_sock *hc = NULL;
-
-       if (ccid_get_current_tx_ccid(dccp_sk(sk)) == DCCPC_CCID3)
-               hc = ccid3_hc_tx_sk(sk);
-
-       if (port == 0 || ntohs(inet->inet_dport) == port ||
-           ntohs(inet->inet_sport) == port) {
-               if (hc)
-                       printl("%pI4:%u %pI4:%u %d %d %d %d %u %llu %llu %d\n",
-                              &inet->inet_saddr, ntohs(inet->inet_sport),
-                              &inet->inet_daddr, ntohs(inet->inet_dport), size,
-                              hc->tx_s, hc->tx_rtt, hc->tx_p,
-                              hc->tx_x_calc, hc->tx_x_recv >> 6,
-                              hc->tx_x >> 6, hc->tx_t_ipi);
-               else
-                       printl("%pI4:%u %pI4:%u %d\n",
-                              &inet->inet_saddr, ntohs(inet->inet_sport),
-                              &inet->inet_daddr, ntohs(inet->inet_dport),
-                              size);
-       }
-
-       jprobe_return();
-       return 0;
-}
-
-static struct jprobe dccp_send_probe = {
-       .kp     = {
-               .symbol_name = "dccp_sendmsg",
-       },
-       .entry  = jdccp_sendmsg,
-};
-
-static int dccpprobe_open(struct inode *inode, struct file *file)
-{
-       kfifo_reset(&dccpw.fifo);
-       getnstimeofday64(&dccpw.tstart);
-       return 0;
-}
-
-static ssize_t dccpprobe_read(struct file *file, char __user *buf,
-                             size_t len, loff_t *ppos)
-{
-       int error = 0, cnt = 0;
-       unsigned char *tbuf;
-
-       if (!buf)
-               return -EINVAL;
-
-       if (len == 0)
-               return 0;
-
-       tbuf = vmalloc(len);
-       if (!tbuf)
-               return -ENOMEM;
-
-       error = wait_event_interruptible(dccpw.wait,
-                                        kfifo_len(&dccpw.fifo) != 0);
-       if (error)
-               goto out_free;
-
-       cnt = kfifo_out_locked(&dccpw.fifo, tbuf, len, &dccpw.lock);
-       error = copy_to_user(buf, tbuf, cnt) ? -EFAULT : 0;
-
-out_free:
-       vfree(tbuf);
-
-       return error ? error : cnt;
-}
-
-static const struct file_operations dccpprobe_fops = {
-       .owner   = THIS_MODULE,
-       .open    = dccpprobe_open,
-       .read    = dccpprobe_read,
-       .llseek  = noop_llseek,
-};
-
-static __init int dccpprobe_init(void)
-{
-       int ret = -ENOMEM;
-
-       init_waitqueue_head(&dccpw.wait);
-       spin_lock_init(&dccpw.lock);
-       if (kfifo_alloc(&dccpw.fifo, bufsize, GFP_KERNEL))
-               return ret;
-       if (!proc_create(procname, S_IRUSR, init_net.proc_net, &dccpprobe_fops))
-               goto err0;
-
-       ret = register_jprobe(&dccp_send_probe);
-       if (ret) {
-               ret = request_module("dccp");
-               if (!ret)
-                       ret = register_jprobe(&dccp_send_probe);
-       }
-
-       if (ret)
-               goto err1;
-
-       pr_info("DCCP watch registered (port=%d)\n", port);
-       return 0;
-err1:
-       remove_proc_entry(procname, init_net.proc_net);
-err0:
-       kfifo_free(&dccpw.fifo);
-       return ret;
-}
-module_init(dccpprobe_init);
-
-static __exit void dccpprobe_exit(void)
-{
-       kfifo_free(&dccpw.fifo);
-       remove_proc_entry(procname, init_net.proc_net);
-       unregister_jprobe(&dccp_send_probe);
-
-}
-module_exit(dccpprobe_exit);
-
-MODULE_PARM_DESC(port, "Port to match (0=all)");
-module_param(port, int, 0);
-
-MODULE_PARM_DESC(bufsize, "Log buffer size (default 64k)");
-module_param(bufsize, int, 0);
-
-MODULE_AUTHOR("Ian McDonald <ian.mcdonald@jandi.co.nz>");
-MODULE_DESCRIPTION("DCCP snooper");
-MODULE_LICENSE("GPL");
index 9d43c1f..fa7e92e 100644 (file)
@@ -38,6 +38,9 @@
 #include "dccp.h"
 #include "feat.h"
 
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+
 DEFINE_SNMP_STAT(struct dccp_mib, dccp_statistics) __read_mostly;
 
 EXPORT_SYMBOL_GPL(dccp_statistics);
@@ -110,7 +113,7 @@ void dccp_set_state(struct sock *sk, const int state)
        /* Change state AFTER socket is unhashed to avoid closed
         * socket sitting in hash tables.
         */
-       sk->sk_state = state;
+       inet_sk_set_state(sk, state);
 }
 
 EXPORT_SYMBOL_GPL(dccp_set_state);
@@ -761,6 +764,8 @@ int dccp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
        int rc, size;
        long timeo;
 
+       trace_dccp_probe(sk, len);
+
        if (len > dp->dccps_mss_cache)
                return -EMSGSIZE;
 
diff --git a/net/dccp/trace.h b/net/dccp/trace.h
new file mode 100644 (file)
index 0000000..5062421
--- /dev/null
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM dccp
+
+#if !defined(_TRACE_DCCP_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_DCCP_H
+
+#include <net/sock.h>
+#include "dccp.h"
+#include "ccids/ccid3.h"
+#include <linux/tracepoint.h>
+#include <trace/events/net_probe_common.h>
+
+TRACE_EVENT(dccp_probe,
+
+       TP_PROTO(struct sock *sk, size_t size),
+
+       TP_ARGS(sk, size),
+
+       TP_STRUCT__entry(
+               /* sockaddr_in6 is always bigger than sockaddr_in */
+               __array(__u8, saddr, sizeof(struct sockaddr_in6))
+               __array(__u8, daddr, sizeof(struct sockaddr_in6))
+               __field(__u16, sport)
+               __field(__u16, dport)
+               __field(__u16, size)
+               __field(__u16, tx_s)
+               __field(__u32, tx_rtt)
+               __field(__u32, tx_p)
+               __field(__u32, tx_x_calc)
+               __field(__u64, tx_x_recv)
+               __field(__u64, tx_x)
+               __field(__u32, tx_t_ipi)
+       ),
+
+       TP_fast_assign(
+               const struct inet_sock *inet = inet_sk(sk);
+               struct ccid3_hc_tx_sock *hc = NULL;
+
+               if (ccid_get_current_tx_ccid(dccp_sk(sk)) == DCCPC_CCID3)
+                       hc = ccid3_hc_tx_sk(sk);
+
+               memset(__entry->saddr, 0, sizeof(struct sockaddr_in6));
+               memset(__entry->daddr, 0, sizeof(struct sockaddr_in6));
+
+               TP_STORE_ADDR_PORTS(__entry, inet, sk);
+
+               /* For filtering use */
+               __entry->sport = ntohs(inet->inet_sport);
+               __entry->dport = ntohs(inet->inet_dport);
+
+               __entry->size = size;
+               if (hc) {
+                       __entry->tx_s = hc->tx_s;
+                       __entry->tx_rtt = hc->tx_rtt;
+                       __entry->tx_p = hc->tx_p;
+                       __entry->tx_x_calc = hc->tx_x_calc;
+                       __entry->tx_x_recv = hc->tx_x_recv >> 6;
+                       __entry->tx_x = hc->tx_x >> 6;
+                       __entry->tx_t_ipi = hc->tx_t_ipi;
+               } else {
+                       __entry->tx_s = 0;
+                       memset(&__entry->tx_rtt, 0, (void *)&__entry->tx_t_ipi -
+                              (void *)&__entry->tx_rtt +
+                              sizeof(__entry->tx_t_ipi));
+               }
+       ),
+
+       TP_printk("src=%pISpc dest=%pISpc size=%d tx_s=%d tx_rtt=%d "
+                 "tx_p=%d tx_x_calc=%u tx_x_recv=%llu tx_x=%llu tx_t_ipi=%d",
+                 __entry->saddr, __entry->daddr, __entry->size,
+                 __entry->tx_s, __entry->tx_rtt, __entry->tx_p,
+                 __entry->tx_x_calc, __entry->tx_x_recv, __entry->tx_x,
+                 __entry->tx_t_ipi)
+);
+
+#endif /* _TRACE_TCP_H */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+#include <trace/define_trace.h>
index b03665e..cefb0c3 100644 (file)
@@ -103,7 +103,7 @@ void dsa_legacy_unregister(void);
 #else
 static inline int dsa_legacy_register(void)
 {
-       return -ENODEV;
+       return 0;
 }
 
 static inline void dsa_legacy_unregister(void) { }
index e6e0b7b..2b06bb9 100644 (file)
@@ -70,6 +70,18 @@ static struct sk_buff *brcm_tag_xmit_ll(struct sk_buff *skb,
        if (skb_cow_head(skb, BRCM_TAG_LEN) < 0)
                return NULL;
 
+       /* The Ethernet switch we are interfaced with needs packets to be at
+        * least 64 bytes (including FCS) otherwise they will be discarded when
+        * they enter the switch port logic. When Broadcom tags are enabled, we
+        * need to make sure that packets are at least 68 bytes
+        * (including FCS and tag) because the length verification is done after
+        * the Broadcom tag is stripped off the ingress packet.
+        *
+        * Let dsa_slave_xmit() free the SKB
+        */
+       if (__skb_put_padto(skb, ETH_ZLEN + BRCM_TAG_LEN, false))
+               return NULL;
+
        skb_push(skb, BRCM_TAG_LEN);
 
        if (offset)
index c6c8ad1..47a0a66 100644 (file)
@@ -43,7 +43,6 @@ obj-$(CONFIG_INET_DIAG) += inet_diag.o
 obj-$(CONFIG_INET_TCP_DIAG) += tcp_diag.o
 obj-$(CONFIG_INET_UDP_DIAG) += udp_diag.o
 obj-$(CONFIG_INET_RAW_DIAG) += raw_diag.o
-obj-$(CONFIG_NET_TCPPROBE) += tcp_probe.o
 obj-$(CONFIG_TCP_CONG_BBR) += tcp_bbr.o
 obj-$(CONFIG_TCP_CONG_BIC) += tcp_bic.o
 obj-$(CONFIG_TCP_CONG_CDG) += tcp_cdg.o
index f00499a..54cccdd 100644 (file)
 #endif
 #include <net/l3mdev.h>
 
+#include <trace/events/sock.h>
 
 /* The inetsw table contains everything that inet_create needs to
  * build a new socket.
@@ -789,7 +790,8 @@ int inet_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
        int addr_len = 0;
        int err;
 
-       sock_rps_record_flow(sk);
+       if (likely(!(flags & MSG_ERRQUEUE)))
+               sock_rps_record_flow(sk);
 
        err = sk->sk_prot->recvmsg(sk, msg, size, flags & MSG_DONTWAIT,
                                   flags & ~MSG_DONTWAIT, &addr_len);
@@ -1220,6 +1222,19 @@ int inet_sk_rebuild_header(struct sock *sk)
 }
 EXPORT_SYMBOL(inet_sk_rebuild_header);
 
+void inet_sk_set_state(struct sock *sk, int state)
+{
+       trace_inet_sock_set_state(sk, sk->sk_state, state);
+       sk->sk_state = state;
+}
+EXPORT_SYMBOL(inet_sk_set_state);
+
+void inet_sk_state_store(struct sock *sk, int newstate)
+{
+       trace_inet_sock_set_state(sk, sk->sk_state, newstate);
+       smp_store_release(&sk->sk_state, newstate);
+}
+
 struct sk_buff *inet_gso_segment(struct sk_buff *skb,
                                 netdev_features_t features)
 {
index d57aa64..6f00e43 100644 (file)
@@ -121,14 +121,32 @@ static void esp_ssg_unref(struct xfrm_state *x, void *tmp)
 static void esp_output_done(struct crypto_async_request *base, int err)
 {
        struct sk_buff *skb = base->data;
+       struct xfrm_offload *xo = xfrm_offload(skb);
        void *tmp;
-       struct dst_entry *dst = skb_dst(skb);
-       struct xfrm_state *x = dst->xfrm;
+       struct xfrm_state *x;
+
+       if (xo && (xo->flags & XFRM_DEV_RESUME))
+               x = skb->sp->xvec[skb->sp->len - 1];
+       else
+               x = skb_dst(skb)->xfrm;
 
        tmp = ESP_SKB_CB(skb)->tmp;
        esp_ssg_unref(x, tmp);
        kfree(tmp);
-       xfrm_output_resume(skb, err);
+
+       if (xo && (xo->flags & XFRM_DEV_RESUME)) {
+               if (err) {
+                       XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
+                       kfree_skb(skb);
+                       return;
+               }
+
+               skb_push(skb, skb->data - skb_mac_header(skb));
+               secpath_reset(skb);
+               xfrm_dev_resume(skb);
+       } else {
+               xfrm_output_resume(skb, err);
+       }
 }
 
 /* Move ESP header back into place. */
@@ -825,17 +843,13 @@ static int esp_init_aead(struct xfrm_state *x)
        char aead_name[CRYPTO_MAX_ALG_NAME];
        struct crypto_aead *aead;
        int err;
-       u32 mask = 0;
 
        err = -ENAMETOOLONG;
        if (snprintf(aead_name, CRYPTO_MAX_ALG_NAME, "%s(%s)",
                     x->geniv, x->aead->alg_name) >= CRYPTO_MAX_ALG_NAME)
                goto error;
 
-       if (x->xso.offload_handle)
-               mask |= CRYPTO_ALG_ASYNC;
-
-       aead = crypto_alloc_aead(aead_name, 0, mask);
+       aead = crypto_alloc_aead(aead_name, 0, 0);
        err = PTR_ERR(aead);
        if (IS_ERR(aead))
                goto error;
@@ -865,7 +879,6 @@ static int esp_init_authenc(struct xfrm_state *x)
        char authenc_name[CRYPTO_MAX_ALG_NAME];
        unsigned int keylen;
        int err;
-       u32 mask = 0;
 
        err = -EINVAL;
        if (!x->ealg)
@@ -891,10 +904,7 @@ static int esp_init_authenc(struct xfrm_state *x)
                        goto error;
        }
 
-       if (x->xso.offload_handle)
-               mask |= CRYPTO_ALG_ASYNC;
-
-       aead = crypto_alloc_aead(authenc_name, 0, mask);
+       aead = crypto_alloc_aead(authenc_name, 0, 0);
        err = PTR_ERR(aead);
        if (IS_ERR(aead))
                goto error;
index f8b918c..c359f3c 100644 (file)
@@ -108,75 +108,36 @@ static void esp4_gso_encap(struct xfrm_state *x, struct sk_buff *skb)
 static struct sk_buff *esp4_gso_segment(struct sk_buff *skb,
                                        netdev_features_t features)
 {
-       __u32 seq;
-       int err = 0;
-       struct sk_buff *skb2;
        struct xfrm_state *x;
        struct ip_esp_hdr *esph;
        struct crypto_aead *aead;
-       struct sk_buff *segs = ERR_PTR(-EINVAL);
        netdev_features_t esp_features = features;
        struct xfrm_offload *xo = xfrm_offload(skb);
 
        if (!xo)
-               goto out;
-
-       seq = xo->seq.low;
+               return ERR_PTR(-EINVAL);
 
        x = skb->sp->xvec[skb->sp->len - 1];
        aead = x->data;
        esph = ip_esp_hdr(skb);
 
        if (esph->spi != x->id.spi)
-               goto out;
+               return ERR_PTR(-EINVAL);
 
        if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead)))
-               goto out;
+               return ERR_PTR(-EINVAL);
 
        __skb_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead));
 
        skb->encap_hdr_csum = 1;
 
-       if (!(features & NETIF_F_HW_ESP))
+       if (!(features & NETIF_F_HW_ESP) || !x->xso.offload_handle ||
+           (x->xso.dev != skb->dev))
                esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK);
 
-       segs = x->outer_mode->gso_segment(x, skb, esp_features);
-       if (IS_ERR_OR_NULL(segs))
-               goto out;
-
-       __skb_pull(skb, skb->data - skb_mac_header(skb));
-
-       skb2 = segs;
-       do {
-               struct sk_buff *nskb = skb2->next;
-
-               xo = xfrm_offload(skb2);
-               xo->flags |= XFRM_GSO_SEGMENT;
-               xo->seq.low = seq;
-               xo->seq.hi = xfrm_replay_seqhi(x, seq);
+       xo->flags |= XFRM_GSO_SEGMENT;
 
-               if(!(features & NETIF_F_HW_ESP))
-                       xo->flags |= CRYPTO_FALLBACK;
-
-               x->outer_mode->xmit(x, skb2);
-
-               err = x->type_offload->xmit(x, skb2, esp_features);
-               if (err) {
-                       kfree_skb_list(segs);
-                       return ERR_PTR(err);
-               }
-
-               if (!skb_is_gso(skb2))
-                       seq++;
-               else
-                       seq += skb_shinfo(skb2)->gso_segs;
-
-               skb_push(skb2, skb2->mac_len);
-               skb2 = nskb;
-       } while (skb2);
-
-out:
-       return segs;
+       return x->outer_mode->gso_segment(x, skb, esp_features);
 }
 
 static int esp_input_tail(struct xfrm_state *x, struct sk_buff *skb)
@@ -203,6 +164,7 @@ static int esp_xmit(struct xfrm_state *x, struct sk_buff *skb,  netdev_features_
        struct crypto_aead *aead;
        struct esp_info esp;
        bool hw_offload = true;
+       __u32 seq;
 
        esp.inplace = true;
 
@@ -241,23 +203,30 @@ static int esp_xmit(struct xfrm_state *x, struct sk_buff *skb,  netdev_features_
                        return esp.nfrags;
        }
 
+       seq = xo->seq.low;
+
        esph = esp.esph;
        esph->spi = x->id.spi;
 
        skb_push(skb, -skb_network_offset(skb));
 
        if (xo->flags & XFRM_GSO_SEGMENT) {
-               esph->seq_no = htonl(xo->seq.low);
-       } else {
-               ip_hdr(skb)->tot_len = htons(skb->len);
-               ip_send_check(ip_hdr(skb));
+               esph->seq_no = htonl(seq);
+
+               if (!skb_is_gso(skb))
+                       xo->seq.low++;
+               else
+                       xo->seq.low += skb_shinfo(skb)->gso_segs;
        }
 
+       esp.seqno = cpu_to_be64(seq + ((u64)xo->seq.hi << 32));
+
+       ip_hdr(skb)->tot_len = htons(skb->len);
+       ip_send_check(ip_hdr(skb));
+
        if (hw_offload)
                return 0;
 
-       esp.seqno = cpu_to_be64(xo->seq.low + ((u64)xo->seq.hi << 32));
-
        err = esp_output_tail(x, skb, &esp);
        if (err)
                return err;
index f52d27a..08259d0 100644 (file)
@@ -1298,14 +1298,19 @@ err_table_hash_alloc:
 
 static void ip_fib_net_exit(struct net *net)
 {
-       unsigned int i;
+       int i;
 
        rtnl_lock();
 #ifdef CONFIG_IP_MULTIPLE_TABLES
        RCU_INIT_POINTER(net->ipv4.fib_main, NULL);
        RCU_INIT_POINTER(net->ipv4.fib_default, NULL);
 #endif
-       for (i = 0; i < FIB_TABLE_HASHSZ; i++) {
+       /* Destroy the tables in reverse order to guarantee that the
+        * local table, ID 255, is destroyed before the main table, ID
+        * 254. This is necessary as the local table may contain
+        * references to data contained in the main table.
+        */
+       for (i = FIB_TABLE_HASHSZ - 1; i >= 0; i--) {
                struct hlist_head *head = &net->ipv4.fib_table_hash[i];
                struct hlist_node *tmp;
                struct fib_table *tb;
index f04d944..c586597 100644 (file)
@@ -698,7 +698,7 @@ bool fib_metrics_match(struct fib_config *cfg, struct fib_info *fi)
 
        nla_for_each_attr(nla, cfg->fc_mx, cfg->fc_mx_len, remaining) {
                int type = nla_type(nla);
-               u32 val;
+               u32 fi_val, val;
 
                if (!type)
                        continue;
@@ -715,7 +715,11 @@ bool fib_metrics_match(struct fib_config *cfg, struct fib_info *fi)
                        val = nla_get_u32(nla);
                }
 
-               if (fi->fib_metrics->metrics[type - 1] != val)
+               fi_val = fi->fib_metrics->metrics[type - 1];
+               if (type == RTAX_FEATURES)
+                       fi_val &= ~DST_FEATURE_ECN_CA;
+
+               if (fi_val != val)
                        return false;
        }
 
index 4ca46dc..12410ec 100644 (file)
@@ -685,7 +685,7 @@ static void reqsk_timer_handler(struct timer_list *t)
        int max_retries, thresh;
        u8 defer_accept;
 
-       if (sk_state_load(sk_listener) != TCP_LISTEN)
+       if (inet_sk_state_load(sk_listener) != TCP_LISTEN)
                goto drop;
 
        max_retries = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_synack_retries;
@@ -783,7 +783,7 @@ struct sock *inet_csk_clone_lock(const struct sock *sk,
        if (newsk) {
                struct inet_connection_sock *newicsk = inet_csk(newsk);
 
-               newsk->sk_state = TCP_SYN_RECV;
+               inet_sk_set_state(newsk, TCP_SYN_RECV);
                newicsk->icsk_bind_hash = NULL;
 
                inet_sk(newsk)->inet_dport = inet_rsk(req)->ir_rmt_port;
@@ -877,7 +877,7 @@ int inet_csk_listen_start(struct sock *sk, int backlog)
         * It is OK, because this socket enters to hash table only
         * after validation is complete.
         */
-       sk_state_store(sk, TCP_LISTEN);
+       inet_sk_state_store(sk, TCP_LISTEN);
        if (!sk->sk_prot->get_port(sk, inet->inet_num)) {
                inet->inet_sport = htons(inet->inet_num);
 
@@ -888,7 +888,7 @@ int inet_csk_listen_start(struct sock *sk, int backlog)
                        return 0;
        }
 
-       sk->sk_state = TCP_CLOSE;
+       inet_sk_set_state(sk, TCP_CLOSE);
        return err;
 }
 EXPORT_SYMBOL_GPL(inet_csk_listen_start);
index c9c35b6..a383f29 100644 (file)
@@ -564,12 +564,18 @@ static int inet_diag_bc_run(const struct nlattr *_bc,
                case INET_DIAG_BC_JMP:
                        yes = 0;
                        break;
+               case INET_DIAG_BC_S_EQ:
+                       yes = entry->sport == op[1].no;
+                       break;
                case INET_DIAG_BC_S_GE:
                        yes = entry->sport >= op[1].no;
                        break;
                case INET_DIAG_BC_S_LE:
                        yes = entry->sport <= op[1].no;
                        break;
+               case INET_DIAG_BC_D_EQ:
+                       yes = entry->dport == op[1].no;
+                       break;
                case INET_DIAG_BC_D_GE:
                        yes = entry->dport >= op[1].no;
                        break;
@@ -802,8 +808,10 @@ static int inet_diag_bc_audit(const struct nlattr *attr,
                        if (!valid_devcond(bc, len, &min_len))
                                return -EINVAL;
                        break;
+               case INET_DIAG_BC_S_EQ:
                case INET_DIAG_BC_S_GE:
                case INET_DIAG_BC_S_LE:
+               case INET_DIAG_BC_D_EQ:
                case INET_DIAG_BC_D_GE:
                case INET_DIAG_BC_D_LE:
                        if (!valid_port_comparison(bc, len, &min_len))
index f6f5810..37b7da0 100644 (file)
@@ -544,7 +544,7 @@ bool inet_ehash_nolisten(struct sock *sk, struct sock *osk)
                sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
        } else {
                percpu_counter_inc(sk->sk_prot->orphan_count);
-               sk->sk_state = TCP_CLOSE;
+               inet_sk_set_state(sk, TCP_CLOSE);
                sock_set_flag(sk, SOCK_DEAD);
                inet_csk_destroy_sock(sk);
        }
index fd4d6e9..b61f228 100644 (file)
@@ -313,9 +313,6 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
                                return PACKET_REJECT;
 
                        md = ip_tunnel_info_opts(&tun_dst->u.tun_info);
-                       if (!md)
-                               return PACKET_REJECT;
-
                        memcpy(md, pkt_md, sizeof(*md));
                        md->version = ver;
 
@@ -434,11 +431,13 @@ static int gre_rcv(struct sk_buff *skb)
                     tpi.proto == htons(ETH_P_ERSPAN2))) {
                if (erspan_rcv(skb, &tpi, hdr_len) == PACKET_RCVD)
                        return 0;
+               goto out;
        }
 
        if (ipgre_rcv(skb, &tpi, hdr_len) == PACKET_RCVD)
                return 0;
 
+out:
        icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
 drop:
        kfree_skb(skb);
@@ -1332,6 +1331,7 @@ static const struct net_device_ops erspan_netdev_ops = {
 static void ipgre_tap_setup(struct net_device *dev)
 {
        ether_setup(dev);
+       dev->max_mtu = 0;
        dev->netdev_ops = &gre_tap_netdev_ops;
        dev->priv_flags &= ~IFF_TX_SKB_SHARING;
        dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
index c470fec..f68cb33 100644 (file)
 #include <asm/ioctls.h>
 #include <net/busy_poll.h>
 
-#include <trace/events/tcp.h>
-
 struct percpu_counter tcp_orphan_count;
 EXPORT_SYMBOL_GPL(tcp_orphan_count);
 
@@ -500,11 +498,9 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
        const struct tcp_sock *tp = tcp_sk(sk);
        int state;
 
-       sock_rps_record_flow(sk);
-
        sock_poll_wait(file, sk_sleep(sk), wait);
 
-       state = sk_state_load(sk);
+       state = inet_sk_state_load(sk);
        if (state == TCP_LISTEN)
                return inet_csk_listen_poll(sk);
 
@@ -1106,12 +1102,15 @@ static int linear_payload_sz(bool first_skb)
        return 0;
 }
 
-static int select_size(const struct sock *sk, bool sg, bool first_skb)
+static int select_size(const struct sock *sk, bool sg, bool first_skb, bool zc)
 {
        const struct tcp_sock *tp = tcp_sk(sk);
        int tmp = tp->mss_cache;
 
        if (sg) {
+               if (zc)
+                       return 0;
+
                if (sk_can_gso(sk)) {
                        tmp = linear_payload_sz(first_skb);
                } else {
@@ -1188,7 +1187,7 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size)
        int flags, err, copied = 0;
        int mss_now = 0, size_goal, copied_syn = 0;
        bool process_backlog = false;
-       bool sg;
+       bool sg, zc = false;
        long timeo;
 
        flags = msg->msg_flags;
@@ -1206,7 +1205,8 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size)
                        goto out_err;
                }
 
-               if (!(sk_check_csum_caps(sk) && sk->sk_route_caps & NETIF_F_SG))
+               zc = sk_check_csum_caps(sk) && sk->sk_route_caps & NETIF_F_SG;
+               if (!zc)
                        uarg->zerocopy = 0;
        }
 
@@ -1283,6 +1283,7 @@ restart:
 
                if (copy <= 0 || !tcp_skb_can_collapse_to(skb)) {
                        bool first_skb;
+                       int linear;
 
 new_segment:
                        /* Allocate new segment. If the interface is SG,
@@ -1296,9 +1297,8 @@ new_segment:
                                goto restart;
                        }
                        first_skb = tcp_rtx_and_write_queues_empty(sk);
-                       skb = sk_stream_alloc_skb(sk,
-                                                 select_size(sk, sg, first_skb),
-                                                 sk->sk_allocation,
+                       linear = select_size(sk, sg, first_skb, zc);
+                       skb = sk_stream_alloc_skb(sk, linear, sk->sk_allocation,
                                                  first_skb);
                        if (!skb)
                                goto wait_for_memory;
@@ -1327,13 +1327,13 @@ new_segment:
                        copy = msg_data_left(msg);
 
                /* Where to copy to? */
-               if (skb_availroom(skb) > 0) {
+               if (skb_availroom(skb) > 0 && !zc) {
                        /* We have some space in skb head. Superb! */
                        copy = min_t(int, copy, skb_availroom(skb));
                        err = skb_add_data_nocache(sk, skb, &msg->msg_iter, copy);
                        if (err)
                                goto do_fault;
-               } else if (!uarg || !uarg->zerocopy) {
+               } else if (!zc) {
                        bool merge = true;
                        int i = skb_shinfo(skb)->nr_frags;
                        struct page_frag *pfrag = sk_page_frag(sk);
@@ -1373,8 +1373,10 @@ new_segment:
                        pfrag->offset += copy;
                } else {
                        err = skb_zerocopy_iter_stream(sk, skb, msg, copy, uarg);
-                       if (err == -EMSGSIZE || err == -EEXIST)
+                       if (err == -EMSGSIZE || err == -EEXIST) {
+                               tcp_mark_push(tp, skb);
                                goto new_segment;
+                       }
                        if (err < 0)
                                goto do_error;
                        copy = err;
@@ -2040,8 +2042,6 @@ void tcp_set_state(struct sock *sk, int state)
 {
        int oldstate = sk->sk_state;
 
-       trace_tcp_set_state(sk, oldstate, state);
-
        switch (state) {
        case TCP_ESTABLISHED:
                if (oldstate != TCP_ESTABLISHED)
@@ -2065,7 +2065,7 @@ void tcp_set_state(struct sock *sk, int state)
        /* Change state AFTER socket is unhashed to avoid closed
         * socket sitting in hash tables.
         */
-       sk_state_store(sk, state);
+       inet_sk_state_store(sk, state);
 
 #ifdef STATE_TRACE
        SOCK_DEBUG(sk, "TCP sk=%p, State %s -> %s\n", sk, statename[oldstate], statename[state]);
@@ -2920,7 +2920,7 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
        if (sk->sk_type != SOCK_STREAM)
                return;
 
-       info->tcpi_state = sk_state_load(sk);
+       info->tcpi_state = inet_sk_state_load(sk);
 
        /* Report meaningful fields for all TCP states, including listeners */
        rate = READ_ONCE(sk->sk_pacing_rate);
index abbf0ed..81148f7 100644 (file)
@@ -24,7 +24,7 @@ static void tcp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
 {
        struct tcp_info *info = _info;
 
-       if (sk_state_load(sk) == TCP_LISTEN) {
+       if (inet_sk_state_load(sk) == TCP_LISTEN) {
                r->idiag_rqueue = sk->sk_ack_backlog;
                r->idiag_wqueue = sk->sk_max_ack_backlog;
        } else if (sk->sk_type == SOCK_STREAM) {
index 4d55c4b..ff71b18 100644 (file)
@@ -5299,6 +5299,9 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
        unsigned int len = skb->len;
        struct tcp_sock *tp = tcp_sk(sk);
 
+       /* TCP congestion window tracking */
+       trace_tcp_probe(sk, skb);
+
        tcp_mstamp_refresh(tp);
        if (unlikely(!sk->sk_rx_dst))
                inet_csk(sk)->icsk_af_ops->sk_rx_dst_set(sk, skb);
index 94e2835..5d20324 100644 (file)
@@ -1911,7 +1911,7 @@ void tcp_v4_destroy_sock(struct sock *sk)
        /* Clean up the MD5 key list, if any */
        if (tp->md5sig_info) {
                tcp_clear_md5_list(sk);
-               kfree_rcu(tp->md5sig_info, rcu);
+               kfree_rcu(rcu_dereference_protected(tp->md5sig_info, 1), rcu);
                tp->md5sig_info = NULL;
        }
 #endif
@@ -2281,7 +2281,7 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
                timer_expires = jiffies;
        }
 
-       state = sk_state_load(sk);
+       state = inet_sk_state_load(sk);
        if (state == TCP_LISTEN)
                rx_queue = sk->sk_ack_backlog;
        else
diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
deleted file mode 100644 (file)
index 697f4c6..0000000
+++ /dev/null
@@ -1,301 +0,0 @@
-/*
- * tcpprobe - Observe the TCP flow with kprobes.
- *
- * The idea for this came from Werner Almesberger's umlsim
- * Copyright (C) 2004, Stephen Hemminger <shemminger@osdl.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/kernel.h>
-#include <linux/kprobes.h>
-#include <linux/socket.h>
-#include <linux/tcp.h>
-#include <linux/slab.h>
-#include <linux/proc_fs.h>
-#include <linux/module.h>
-#include <linux/ktime.h>
-#include <linux/time.h>
-#include <net/net_namespace.h>
-
-#include <net/tcp.h>
-
-MODULE_AUTHOR("Stephen Hemminger <shemminger@linux-foundation.org>");
-MODULE_DESCRIPTION("TCP cwnd snooper");
-MODULE_LICENSE("GPL");
-MODULE_VERSION("1.1");
-
-static int port __read_mostly;
-MODULE_PARM_DESC(port, "Port to match (0=all)");
-module_param(port, int, 0);
-
-static unsigned int bufsize __read_mostly = 4096;
-MODULE_PARM_DESC(bufsize, "Log buffer size in packets (4096)");
-module_param(bufsize, uint, 0);
-
-static unsigned int fwmark __read_mostly;
-MODULE_PARM_DESC(fwmark, "skb mark to match (0=no mark)");
-module_param(fwmark, uint, 0);
-
-static int full __read_mostly;
-MODULE_PARM_DESC(full, "Full log (1=every ack packet received,  0=only cwnd changes)");
-module_param(full, int, 0);
-
-static const char procname[] = "tcpprobe";
-
-struct tcp_log {
-       ktime_t tstamp;
-       union {
-               struct sockaddr         raw;
-               struct sockaddr_in      v4;
-               struct sockaddr_in6     v6;
-       }       src, dst;
-       u16     length;
-       u32     snd_nxt;
-       u32     snd_una;
-       u32     snd_wnd;
-       u32     rcv_wnd;
-       u32     snd_cwnd;
-       u32     ssthresh;
-       u32     srtt;
-};
-
-static struct {
-       spinlock_t      lock;
-       wait_queue_head_t wait;
-       ktime_t         start;
-       u32             lastcwnd;
-
-       unsigned long   head, tail;
-       struct tcp_log  *log;
-} tcp_probe;
-
-static inline int tcp_probe_used(void)
-{
-       return (tcp_probe.head - tcp_probe.tail) & (bufsize - 1);
-}
-
-static inline int tcp_probe_avail(void)
-{
-       return bufsize - tcp_probe_used() - 1;
-}
-
-#define tcp_probe_copy_fl_to_si4(inet, si4, mem)               \
-       do {                                                    \
-               si4.sin_family = AF_INET;                       \
-               si4.sin_port = inet->inet_##mem##port;          \
-               si4.sin_addr.s_addr = inet->inet_##mem##addr;   \
-       } while (0)                                             \
-
-/*
- * Hook inserted to be called before each receive packet.
- * Note: arguments must match tcp_rcv_established()!
- */
-static void jtcp_rcv_established(struct sock *sk, struct sk_buff *skb,
-                                const struct tcphdr *th)
-{
-       unsigned int len = skb->len;
-       const struct tcp_sock *tp = tcp_sk(sk);
-       const struct inet_sock *inet = inet_sk(sk);
-
-       /* Only update if port or skb mark matches */
-       if (((port == 0 && fwmark == 0) ||
-            ntohs(inet->inet_dport) == port ||
-            ntohs(inet->inet_sport) == port ||
-            (fwmark > 0 && skb->mark == fwmark)) &&
-           (full || tp->snd_cwnd != tcp_probe.lastcwnd)) {
-
-               spin_lock(&tcp_probe.lock);
-               /* If log fills, just silently drop */
-               if (tcp_probe_avail() > 1) {
-                       struct tcp_log *p = tcp_probe.log + tcp_probe.head;
-
-                       p->tstamp = ktime_get();
-                       switch (sk->sk_family) {
-                       case AF_INET:
-                               tcp_probe_copy_fl_to_si4(inet, p->src.v4, s);
-                               tcp_probe_copy_fl_to_si4(inet, p->dst.v4, d);
-                               break;
-                       case AF_INET6:
-                               memset(&p->src.v6, 0, sizeof(p->src.v6));
-                               memset(&p->dst.v6, 0, sizeof(p->dst.v6));
-#if IS_ENABLED(CONFIG_IPV6)
-                               p->src.v6.sin6_family = AF_INET6;
-                               p->src.v6.sin6_port = inet->inet_sport;
-                               p->src.v6.sin6_addr = inet6_sk(sk)->saddr;
-
-                               p->dst.v6.sin6_family = AF_INET6;
-                               p->dst.v6.sin6_port = inet->inet_dport;
-                               p->dst.v6.sin6_addr = sk->sk_v6_daddr;
-#endif
-                               break;
-                       default:
-                               BUG();
-                       }
-
-                       p->length = len;
-                       p->snd_nxt = tp->snd_nxt;
-                       p->snd_una = tp->snd_una;
-                       p->snd_cwnd = tp->snd_cwnd;
-                       p->snd_wnd = tp->snd_wnd;
-                       p->rcv_wnd = tp->rcv_wnd;
-                       p->ssthresh = tcp_current_ssthresh(sk);
-                       p->srtt = tp->srtt_us >> 3;
-
-                       tcp_probe.head = (tcp_probe.head + 1) & (bufsize - 1);
-               }
-               tcp_probe.lastcwnd = tp->snd_cwnd;
-               spin_unlock(&tcp_probe.lock);
-
-               wake_up(&tcp_probe.wait);
-       }
-
-       jprobe_return();
-}
-
-static struct jprobe tcp_jprobe = {
-       .kp = {
-               .symbol_name    = "tcp_rcv_established",
-       },
-       .entry  = jtcp_rcv_established,
-};
-
-static int tcpprobe_open(struct inode *inode, struct file *file)
-{
-       /* Reset (empty) log */
-       spin_lock_bh(&tcp_probe.lock);
-       tcp_probe.head = tcp_probe.tail = 0;
-       tcp_probe.start = ktime_get();
-       spin_unlock_bh(&tcp_probe.lock);
-
-       return 0;
-}
-
-static int tcpprobe_sprint(char *tbuf, int n)
-{
-       const struct tcp_log *p
-               = tcp_probe.log + tcp_probe.tail;
-       struct timespec64 ts
-               = ktime_to_timespec64(ktime_sub(p->tstamp, tcp_probe.start));
-
-       return scnprintf(tbuf, n,
-                       "%lu.%09lu %pISpc %pISpc %d %#x %#x %u %u %u %u %u\n",
-                       (unsigned long)ts.tv_sec,
-                       (unsigned long)ts.tv_nsec,
-                       &p->src, &p->dst, p->length, p->snd_nxt, p->snd_una,
-                       p->snd_cwnd, p->ssthresh, p->snd_wnd, p->srtt, p->rcv_wnd);
-}
-
-static ssize_t tcpprobe_read(struct file *file, char __user *buf,
-                            size_t len, loff_t *ppos)
-{
-       int error = 0;
-       size_t cnt = 0;
-
-       if (!buf)
-               return -EINVAL;
-
-       while (cnt < len) {
-               char tbuf[256];
-               int width;
-
-               /* Wait for data in buffer */
-               error = wait_event_interruptible(tcp_probe.wait,
-                                                tcp_probe_used() > 0);
-               if (error)
-                       break;
-
-               spin_lock_bh(&tcp_probe.lock);
-               if (tcp_probe.head == tcp_probe.tail) {
-                       /* multiple readers race? */
-                       spin_unlock_bh(&tcp_probe.lock);
-                       continue;
-               }
-
-               width = tcpprobe_sprint(tbuf, sizeof(tbuf));
-
-               if (cnt + width < len)
-                       tcp_probe.tail = (tcp_probe.tail + 1) & (bufsize - 1);
-
-               spin_unlock_bh(&tcp_probe.lock);
-
-               /* if record greater than space available
-                  return partial buffer (so far) */
-               if (cnt + width >= len)
-                       break;
-
-               if (copy_to_user(buf + cnt, tbuf, width))
-                       return -EFAULT;
-               cnt += width;
-       }
-
-       return cnt == 0 ? error : cnt;
-}
-
-static const struct file_operations tcpprobe_fops = {
-       .owner   = THIS_MODULE,
-       .open    = tcpprobe_open,
-       .read    = tcpprobe_read,
-       .llseek  = noop_llseek,
-};
-
-static __init int tcpprobe_init(void)
-{
-       int ret = -ENOMEM;
-
-       /* Warning: if the function signature of tcp_rcv_established,
-        * has been changed, you also have to change the signature of
-        * jtcp_rcv_established, otherwise you end up right here!
-        */
-       BUILD_BUG_ON(__same_type(tcp_rcv_established,
-                                jtcp_rcv_established) == 0);
-
-       init_waitqueue_head(&tcp_probe.wait);
-       spin_lock_init(&tcp_probe.lock);
-
-       if (bufsize == 0)
-               return -EINVAL;
-
-       bufsize = roundup_pow_of_two(bufsize);
-       tcp_probe.log = kcalloc(bufsize, sizeof(struct tcp_log), GFP_KERNEL);
-       if (!tcp_probe.log)
-               goto err0;
-
-       if (!proc_create(procname, S_IRUSR, init_net.proc_net, &tcpprobe_fops))
-               goto err0;
-
-       ret = register_jprobe(&tcp_jprobe);
-       if (ret)
-               goto err1;
-
-       pr_info("probe registered (port=%d/fwmark=%u) bufsize=%u\n",
-               port, fwmark, bufsize);
-       return 0;
- err1:
-       remove_proc_entry(procname, init_net.proc_net);
- err0:
-       kfree(tcp_probe.log);
-       return ret;
-}
-module_init(tcpprobe_init);
-
-static __exit void tcpprobe_exit(void)
-{
-       remove_proc_entry(procname, init_net.proc_net);
-       unregister_jprobe(&tcp_jprobe);
-       kfree(tcp_probe.log);
-}
-module_exit(tcpprobe_exit);
index e9c0d1e..db72619 100644 (file)
@@ -2490,8 +2490,6 @@ unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait)
        if (!skb_queue_empty(&udp_sk(sk)->reader_queue))
                mask |= POLLIN | POLLRDNORM;
 
-       sock_rps_record_flow(sk);
-
        /* Check for false positives due to checksum errors */
        if ((mask & POLLRDNORM) && !(file->f_flags & O_NONBLOCK) &&
            !(sk->sk_shutdown & RCV_SHUTDOWN) && first_packet_length(sk) == -1)
index e50b7fe..bcfc00e 100644 (file)
@@ -23,6 +23,12 @@ int xfrm4_extract_input(struct xfrm_state *x, struct sk_buff *skb)
        return xfrm4_extract_header(skb);
 }
 
+static int xfrm4_rcv_encap_finish2(struct net *net, struct sock *sk,
+                                  struct sk_buff *skb)
+{
+       return dst_input(skb);
+}
+
 static inline int xfrm4_rcv_encap_finish(struct net *net, struct sock *sk,
                                         struct sk_buff *skb)
 {
@@ -33,7 +39,11 @@ static inline int xfrm4_rcv_encap_finish(struct net *net, struct sock *sk,
                                         iph->tos, skb->dev))
                        goto drop;
        }
-       return dst_input(skb);
+
+       if (xfrm_trans_queue(skb, xfrm4_rcv_encap_finish2))
+               goto drop;
+
+       return 0;
 drop:
        kfree_skb(skb);
        return NET_RX_DROP;
index 7d885a4..8affc6d 100644 (file)
@@ -105,18 +105,15 @@ static struct sk_buff *xfrm4_mode_tunnel_gso_segment(struct xfrm_state *x,
 {
        __skb_push(skb, skb->mac_len);
        return skb_mac_gso_segment(skb, features);
-
 }
 
 static void xfrm4_mode_tunnel_xmit(struct xfrm_state *x, struct sk_buff *skb)
 {
        struct xfrm_offload *xo = xfrm_offload(skb);
 
-       if (xo->flags & XFRM_GSO_SEGMENT) {
-               skb->network_header = skb->network_header - x->props.header_len;
+       if (xo->flags & XFRM_GSO_SEGMENT)
                skb->transport_header = skb->network_header +
                                        sizeof(struct iphdr);
-       }
 
        skb_reset_mac_len(skb);
        pskb_pull(skb, skb->mac_len + x->props.header_len);
index ed06b11..2435f7a 100644 (file)
@@ -3438,6 +3438,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
                } else if (event == NETDEV_CHANGE) {
                        if (!addrconf_link_ready(dev)) {
                                /* device is still not ready. */
+                               rt6_sync_down_dev(dev, event);
                                break;
                        }
 
@@ -3449,6 +3450,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
                                         * multicast snooping switches
                                         */
                                        ipv6_mc_up(idev);
+                                       rt6_sync_up(dev, RTNH_F_LINKDOWN);
                                        break;
                                }
                                idev->if_flags |= IF_READY;
@@ -3484,6 +3486,9 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
                        if (run_pending)
                                addrconf_dad_run(idev);
 
+                       /* Device has an address by now */
+                       rt6_sync_up(dev, RTNH_F_DEAD);
+
                        /*
                         * If the MTU changed during the interface down,
                         * when the interface up, the changed MTU must be
@@ -3577,6 +3582,7 @@ static bool addr_is_local(const struct in6_addr *addr)
 
 static int addrconf_ifdown(struct net_device *dev, int how)
 {
+       unsigned long event = how ? NETDEV_UNREGISTER : NETDEV_DOWN;
        struct net *net = dev_net(dev);
        struct inet6_dev *idev;
        struct inet6_ifaddr *ifa, *tmp;
@@ -3586,8 +3592,7 @@ static int addrconf_ifdown(struct net_device *dev, int how)
 
        ASSERT_RTNL();
 
-       rt6_ifdown(net, dev);
-       neigh_ifdown(&nd_tbl, dev);
+       rt6_disable_ip(dev, event);
 
        idev = __in6_dev_get(dev);
        if (!idev)
index c26f712..c9441ca 100644 (file)
@@ -210,7 +210,6 @@ lookup_protocol:
        np->mcast_hops  = IPV6_DEFAULT_MCASTHOPS;
        np->mc_loop     = 1;
        np->pmtudisc    = IPV6_PMTUDISC_WANT;
-       np->autoflowlabel = ip6_default_np_autolabel(net);
        np->repflow     = net->ipv6.sysctl.flowlabel_reflect;
        sk->sk_ipv6only = net->ipv6.sysctl.bindv6only;
 
index a902ff8..7c888c6 100644 (file)
@@ -141,14 +141,32 @@ static void esp_ssg_unref(struct xfrm_state *x, void *tmp)
 static void esp_output_done(struct crypto_async_request *base, int err)
 {
        struct sk_buff *skb = base->data;
+       struct xfrm_offload *xo = xfrm_offload(skb);
        void *tmp;
-       struct dst_entry *dst = skb_dst(skb);
-       struct xfrm_state *x = dst->xfrm;
+       struct xfrm_state *x;
+
+       if (xo && (xo->flags & XFRM_DEV_RESUME))
+               x = skb->sp->xvec[skb->sp->len - 1];
+       else
+               x = skb_dst(skb)->xfrm;
 
        tmp = ESP_SKB_CB(skb)->tmp;
        esp_ssg_unref(x, tmp);
        kfree(tmp);
-       xfrm_output_resume(skb, err);
+
+       if (xo && (xo->flags & XFRM_DEV_RESUME)) {
+               if (err) {
+                       XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
+                       kfree_skb(skb);
+                       return;
+               }
+
+               skb_push(skb, skb->data - skb_mac_header(skb));
+               secpath_reset(skb);
+               xfrm_dev_resume(skb);
+       } else {
+               xfrm_output_resume(skb, err);
+       }
 }
 
 /* Move ESP header back into place. */
@@ -734,17 +752,13 @@ static int esp_init_aead(struct xfrm_state *x)
        char aead_name[CRYPTO_MAX_ALG_NAME];
        struct crypto_aead *aead;
        int err;
-       u32 mask = 0;
 
        err = -ENAMETOOLONG;
        if (snprintf(aead_name, CRYPTO_MAX_ALG_NAME, "%s(%s)",
                     x->geniv, x->aead->alg_name) >= CRYPTO_MAX_ALG_NAME)
                goto error;
 
-       if (x->xso.offload_handle)
-               mask |= CRYPTO_ALG_ASYNC;
-
-       aead = crypto_alloc_aead(aead_name, 0, mask);
+       aead = crypto_alloc_aead(aead_name, 0, 0);
        err = PTR_ERR(aead);
        if (IS_ERR(aead))
                goto error;
@@ -774,7 +788,6 @@ static int esp_init_authenc(struct xfrm_state *x)
        char authenc_name[CRYPTO_MAX_ALG_NAME];
        unsigned int keylen;
        int err;
-       u32 mask = 0;
 
        err = -EINVAL;
        if (!x->ealg)
@@ -800,10 +813,7 @@ static int esp_init_authenc(struct xfrm_state *x)
                        goto error;
        }
 
-       if (x->xso.offload_handle)
-               mask |= CRYPTO_ALG_ASYNC;
-
-       aead = crypto_alloc_aead(authenc_name, 0, mask);
+       aead = crypto_alloc_aead(authenc_name, 0, 0);
        err = PTR_ERR(aead);
        if (IS_ERR(aead))
                goto error;
index 333a478..0bb7d54 100644 (file)
@@ -135,75 +135,36 @@ static void esp6_gso_encap(struct xfrm_state *x, struct sk_buff *skb)
 static struct sk_buff *esp6_gso_segment(struct sk_buff *skb,
                                        netdev_features_t features)
 {
-       __u32 seq;
-       int err = 0;
-       struct sk_buff *skb2;
        struct xfrm_state *x;
        struct ip_esp_hdr *esph;
        struct crypto_aead *aead;
-       struct sk_buff *segs = ERR_PTR(-EINVAL);
        netdev_features_t esp_features = features;
        struct xfrm_offload *xo = xfrm_offload(skb);
 
        if (!xo)
-               goto out;
-
-       seq = xo->seq.low;
+               return ERR_PTR(-EINVAL);
 
        x = skb->sp->xvec[skb->sp->len - 1];
        aead = x->data;
        esph = ip_esp_hdr(skb);
 
        if (esph->spi != x->id.spi)
-               goto out;
+               return ERR_PTR(-EINVAL);
 
        if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead)))
-               goto out;
+               return ERR_PTR(-EINVAL);
 
        __skb_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead));
 
        skb->encap_hdr_csum = 1;
 
-       if (!(features & NETIF_F_HW_ESP))
+       if (!(features & NETIF_F_HW_ESP) || !x->xso.offload_handle ||
+           (x->xso.dev != skb->dev))
                esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK);
 
-       segs = x->outer_mode->gso_segment(x, skb, esp_features);
-       if (IS_ERR_OR_NULL(segs))
-               goto out;
-
-       __skb_pull(skb, skb->data - skb_mac_header(skb));
-
-       skb2 = segs;
-       do {
-               struct sk_buff *nskb = skb2->next;
-
-               xo = xfrm_offload(skb2);
-               xo->flags |= XFRM_GSO_SEGMENT;
-               xo->seq.low = seq;
-               xo->seq.hi = xfrm_replay_seqhi(x, seq);
-
-               if(!(features & NETIF_F_HW_ESP))
-                       xo->flags |= CRYPTO_FALLBACK;
-
-               x->outer_mode->xmit(x, skb2);
-
-               err = x->type_offload->xmit(x, skb2, esp_features);
-               if (err) {
-                       kfree_skb_list(segs);
-                       return ERR_PTR(err);
-               }
-
-               if (!skb_is_gso(skb2))
-                       seq++;
-               else
-                       seq += skb_shinfo(skb2)->gso_segs;
-
-               skb_push(skb2, skb2->mac_len);
-               skb2 = nskb;
-       } while (skb2);
+       xo->flags |= XFRM_GSO_SEGMENT;
 
-out:
-       return segs;
+       return x->outer_mode->gso_segment(x, skb, esp_features);
 }
 
 static int esp6_input_tail(struct xfrm_state *x, struct sk_buff *skb)
@@ -222,6 +183,7 @@ static int esp6_input_tail(struct xfrm_state *x, struct sk_buff *skb)
 
 static int esp6_xmit(struct xfrm_state *x, struct sk_buff *skb,  netdev_features_t features)
 {
+       int len;
        int err;
        int alen;
        int blksize;
@@ -230,6 +192,7 @@ static int esp6_xmit(struct xfrm_state *x, struct sk_buff *skb,  netdev_features
        struct crypto_aead *aead;
        struct esp_info esp;
        bool hw_offload = true;
+       __u32 seq;
 
        esp.inplace = true;
 
@@ -265,28 +228,33 @@ static int esp6_xmit(struct xfrm_state *x, struct sk_buff *skb,  netdev_features
                        return esp.nfrags;
        }
 
+       seq = xo->seq.low;
+
        esph = ip_esp_hdr(skb);
        esph->spi = x->id.spi;
 
        skb_push(skb, -skb_network_offset(skb));
 
        if (xo->flags & XFRM_GSO_SEGMENT) {
-               esph->seq_no = htonl(xo->seq.low);
-       } else {
-               int len;
-
-               len = skb->len - sizeof(struct ipv6hdr);
-               if (len > IPV6_MAXPLEN)
-                       len = 0;
+               esph->seq_no = htonl(seq);
 
-               ipv6_hdr(skb)->payload_len = htons(len);
+               if (!skb_is_gso(skb))
+                       xo->seq.low++;
+               else
+                       xo->seq.low += skb_shinfo(skb)->gso_segs;
        }
 
+       esp.seqno = cpu_to_be64(xo->seq.low + ((u64)xo->seq.hi << 32));
+
+       len = skb->len - sizeof(struct ipv6hdr);
+       if (len > IPV6_MAXPLEN)
+               len = 0;
+
+       ipv6_hdr(skb)->payload_len = htons(len);
+
        if (hw_offload)
                return 0;
 
-       esp.seqno = cpu_to_be64(xo->seq.low + ((u64)xo->seq.hi << 32));
-
        err = esp6_output_tail(x, skb, &esp);
        if (err)
                return err;
index a64d559..edda5ad 100644 (file)
@@ -107,16 +107,13 @@ enum {
 
 void fib6_update_sernum(struct rt6_info *rt)
 {
-       struct fib6_table *table = rt->rt6i_table;
        struct net *net = dev_net(rt->dst.dev);
        struct fib6_node *fn;
 
-       spin_lock_bh(&table->tb6_lock);
        fn = rcu_dereference_protected(rt->rt6i_node,
-                       lockdep_is_held(&table->tb6_lock));
+                       lockdep_is_held(&rt->rt6i_table->tb6_lock));
        if (fn)
                fn->fn_sernum = fib6_new_sernum(net);
-       spin_unlock_bh(&table->tb6_lock);
 }
 
 /*
@@ -1102,8 +1099,8 @@ void fib6_force_start_gc(struct net *net)
                          jiffies + net->ipv6.sysctl.ip6_rt_gc_interval);
 }
 
-static void fib6_update_sernum_upto_root(struct rt6_info *rt,
-                                        int sernum)
+static void __fib6_update_sernum_upto_root(struct rt6_info *rt,
+                                          int sernum)
 {
        struct fib6_node *fn = rcu_dereference_protected(rt->rt6i_node,
                                lockdep_is_held(&rt->rt6i_table->tb6_lock));
@@ -1117,6 +1114,11 @@ static void fib6_update_sernum_upto_root(struct rt6_info *rt,
        }
 }
 
+void fib6_update_sernum_upto_root(struct net *net, struct rt6_info *rt)
+{
+       __fib6_update_sernum_upto_root(rt, fib6_new_sernum(net));
+}
+
 /*
  *     Add routing information to the routing tree.
  *     <destination addr>/<source addr>
@@ -1230,7 +1232,7 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt,
 
        err = fib6_add_rt2node(fn, rt, info, mxc, extack);
        if (!err) {
-               fib6_update_sernum_upto_root(rt, sernum);
+               __fib6_update_sernum_upto_root(rt, sernum);
                fib6_start_gc(info->nl_net, rt);
        }
 
@@ -1887,7 +1889,7 @@ static int fib6_clean_node(struct fib6_walker *w)
 
        for_each_fib6_walker_rt(w) {
                res = c->func(rt, c->arg);
-               if (res < 0) {
+               if (res == -1) {
                        w->leaf = rt;
                        res = fib6_del(rt, &info);
                        if (res) {
@@ -1900,6 +1902,12 @@ static int fib6_clean_node(struct fib6_walker *w)
                                continue;
                        }
                        return 0;
+               } else if (res == -2) {
+                       if (WARN_ON(!rt->rt6i_nsiblings))
+                               continue;
+                       rt = list_last_entry(&rt->rt6i_siblings,
+                                            struct rt6_info, rt6i_siblings);
+                       continue;
                }
                WARN_ON(res != 0);
        }
@@ -1911,7 +1919,8 @@ static int fib6_clean_node(struct fib6_walker *w)
  *     Convenient frontend to tree walker.
  *
  *     func is called on each route.
- *             It may return -1 -> delete this route.
+ *             It may return -2 -> skip multipath route.
+ *                           -1 -> delete this route.
  *                           0  -> continue walking
  */
 
@@ -2103,7 +2112,6 @@ static void fib6_net_exit(struct net *net)
 {
        unsigned int i;
 
-       rt6_ifdown(net, NULL);
        del_timer_sync(&net->ipv6.ip6_fib_timer);
 
        for (i = 0; i < FIB6_TABLE_HASHSZ; i++) {
index 87b9892..db99446 100644 (file)
@@ -507,12 +507,11 @@ static int ip6erspan_rcv(struct sk_buff *skb, int gre_hdr_len,
        struct ip6_tnl *tunnel;
        u8 ver;
 
-       ipv6h = ipv6_hdr(skb);
-       ershdr = (struct erspan_base_hdr *)skb->data;
-
        if (unlikely(!pskb_may_pull(skb, sizeof(*ershdr))))
                return PACKET_REJECT;
 
+       ipv6h = ipv6_hdr(skb);
+       ershdr = (struct erspan_base_hdr *)skb->data;
        ver = (ntohs(ershdr->ver_vlan) & VER_MASK) >> VER_OFFSET;
        tpi->key = cpu_to_be32(ntohs(ershdr->session_id) & ID_MASK);
 
@@ -551,8 +550,6 @@ static int ip6erspan_rcv(struct sk_buff *skb, int gre_hdr_len,
 
                        info = &tun_dst->u.tun_info;
                        md = ip_tunnel_info_opts(info);
-                       if (!md)
-                               return PACKET_REJECT;
 
                        memcpy(md, pkt_md, sizeof(*md));
                        md->version = ver;
@@ -603,12 +600,13 @@ static int gre_rcv(struct sk_buff *skb)
                     tpi.proto == htons(ETH_P_ERSPAN2))) {
                if (ip6erspan_rcv(skb, hdr_len, &tpi) == PACKET_RCVD)
                        return 0;
-               goto drop;
+               goto out;
        }
 
        if (ip6gre_rcv(skb, &tpi) == PACKET_RCVD)
                return 0;
 
+out:
        icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
 drop:
        kfree_skb(skb);
@@ -1335,6 +1333,36 @@ static void ip6gre_tunnel_setup(struct net_device *dev)
        eth_random_addr(dev->perm_addr);
 }
 
+#define GRE6_FEATURES (NETIF_F_SG |            \
+                      NETIF_F_FRAGLIST |       \
+                      NETIF_F_HIGHDMA |        \
+                      NETIF_F_HW_CSUM)
+
+static void ip6gre_tnl_init_features(struct net_device *dev)
+{
+       struct ip6_tnl *nt = netdev_priv(dev);
+
+       dev->features           |= GRE6_FEATURES;
+       dev->hw_features        |= GRE6_FEATURES;
+
+       if (!(nt->parms.o_flags & TUNNEL_SEQ)) {
+               /* TCP offload with GRE SEQ is not supported, nor
+                * can we support 2 levels of outer headers requiring
+                * an update.
+                */
+               if (!(nt->parms.o_flags & TUNNEL_CSUM) ||
+                   nt->encap.type == TUNNEL_ENCAP_NONE) {
+                       dev->features    |= NETIF_F_GSO_SOFTWARE;
+                       dev->hw_features |= NETIF_F_GSO_SOFTWARE;
+               }
+
+               /* Can use a lockless transmit, unless we generate
+                * output sequences
+                */
+               dev->features |= NETIF_F_LLTX;
+       }
+}
+
 static int ip6gre_tunnel_init_common(struct net_device *dev)
 {
        struct ip6_tnl *tunnel;
@@ -1373,6 +1401,7 @@ static int ip6gre_tunnel_init_common(struct net_device *dev)
                dev->features |= NETIF_F_NETNS_LOCAL;
                netif_keep_dst(dev);
        }
+       ip6gre_tnl_init_features(dev);
 
        return 0;
 }
@@ -1707,11 +1736,6 @@ static const struct net_device_ops ip6gre_tap_netdev_ops = {
        .ndo_get_iflink = ip6_tnl_get_iflink,
 };
 
-#define GRE6_FEATURES (NETIF_F_SG |            \
-                      NETIF_F_FRAGLIST |       \
-                      NETIF_F_HIGHDMA |                \
-                      NETIF_F_HW_CSUM)
-
 static int ip6erspan_tap_init(struct net_device *dev)
 {
        struct ip6_tnl *tunnel;
@@ -1770,6 +1794,7 @@ static void ip6gre_tap_setup(struct net_device *dev)
 
        ether_setup(dev);
 
+       dev->max_mtu = 0;
        dev->netdev_ops = &ip6gre_tap_netdev_ops;
        dev->needs_free_netdev = true;
        dev->priv_destructor = ip6gre_dev_free;
@@ -1849,26 +1874,6 @@ static int ip6gre_newlink(struct net *src_net, struct net_device *dev,
        nt->net = dev_net(dev);
        ip6gre_tnl_link_config(nt, !tb[IFLA_MTU]);
 
-       dev->features           |= GRE6_FEATURES;
-       dev->hw_features        |= GRE6_FEATURES;
-
-       if (!(nt->parms.o_flags & TUNNEL_SEQ)) {
-               /* TCP offload with GRE SEQ is not supported, nor
-                * can we support 2 levels of outer headers requiring
-                * an update.
-                */
-               if (!(nt->parms.o_flags & TUNNEL_CSUM) ||
-                   (nt->encap.type == TUNNEL_ENCAP_NONE)) {
-                       dev->features    |= NETIF_F_GSO_SOFTWARE;
-                       dev->hw_features |= NETIF_F_GSO_SOFTWARE;
-               }
-
-               /* Can use a lockless transmit, unless we generate
-                * output sequences
-                */
-               dev->features |= NETIF_F_LLTX;
-       }
-
        err = register_netdevice(dev);
        if (err)
                goto out;
index 176d74f..bcdb615 100644 (file)
@@ -138,6 +138,14 @@ static int ip6_finish_output(struct net *net, struct sock *sk, struct sk_buff *s
                return ret;
        }
 
+#if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
+       /* Policy lookup after SNAT yielded a new policy */
+       if (skb_dst(skb)->xfrm) {
+               IPCB(skb)->flags |= IPSKB_REROUTED;
+               return dst_output(net, sk, skb);
+       }
+#endif
+
        if ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) ||
            dst_allfrag(skb_dst(skb)) ||
            (IP6CB(skb)->frag_max_size && skb->len > IP6CB(skb)->frag_max_size))
@@ -166,6 +174,14 @@ int ip6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
                            !(IP6CB(skb)->flags & IP6SKB_REROUTED));
 }
 
+static bool ip6_autoflowlabel(struct net *net, const struct ipv6_pinfo *np)
+{
+       if (!np->autoflowlabel_set)
+               return ip6_default_np_autolabel(net);
+       else
+               return np->autoflowlabel;
+}
+
 /*
  * xmit an sk_buff (used by TCP, SCTP and DCCP)
  * Note : socket lock is not held for SYNACK packets, but might be modified
@@ -230,7 +246,7 @@ int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
                hlimit = ip6_dst_hoplimit(dst);
 
        ip6_flow_hdr(hdr, tclass, ip6_make_flowlabel(net, skb, fl6->flowlabel,
-                                                    np->autoflowlabel, fl6));
+                               ip6_autoflowlabel(net, np), fl6));
 
        hdr->payload_len = htons(seg_len);
        hdr->nexthdr = proto;
@@ -1626,7 +1642,7 @@ struct sk_buff *__ip6_make_skb(struct sock *sk,
 
        ip6_flow_hdr(hdr, v6_cork->tclass,
                     ip6_make_flowlabel(net, skb, fl6->flowlabel,
-                                       np->autoflowlabel, fl6));
+                                       ip6_autoflowlabel(net, np), fl6));
        hdr->hop_limit = v6_cork->hop_limit;
        hdr->nexthdr = proto;
        hdr->saddr = fl6->saddr;
index 6ff2f21..8a4610e 100644 (file)
@@ -1126,8 +1126,13 @@ route_lookup:
                max_headroom += 8;
                mtu -= 8;
        }
-       if (mtu < IPV6_MIN_MTU)
-               mtu = IPV6_MIN_MTU;
+       if (skb->protocol == htons(ETH_P_IPV6)) {
+               if (mtu < IPV6_MIN_MTU)
+                       mtu = IPV6_MIN_MTU;
+       } else if (mtu < 576) {
+               mtu = 576;
+       }
+
        if (skb_dst(skb) && !t->parms.collect_md)
                skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
        if (skb->len - t->tun_hlen - eth_hlen > mtu && !skb_is_gso(skb)) {
index dbb74f3..18caa95 100644 (file)
@@ -626,6 +626,7 @@ static void vti6_link_config(struct ip6_tnl *t)
 {
        struct net_device *dev = t->dev;
        struct __ip6_tnl_parm *p = &t->parms;
+       struct net_device *tdev = NULL;
 
        memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr));
        memcpy(dev->broadcast, &p->raddr, sizeof(struct in6_addr));
@@ -638,6 +639,25 @@ static void vti6_link_config(struct ip6_tnl *t)
                dev->flags |= IFF_POINTOPOINT;
        else
                dev->flags &= ~IFF_POINTOPOINT;
+
+       if (p->flags & IP6_TNL_F_CAP_XMIT) {
+               int strict = (ipv6_addr_type(&p->raddr) &
+                             (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL));
+               struct rt6_info *rt = rt6_lookup(t->net,
+                                                &p->raddr, &p->laddr,
+                                                p->link, strict);
+
+               if (rt)
+                       tdev = rt->dst.dev;
+               ip6_rt_put(rt);
+       }
+
+       if (!tdev && p->link)
+               tdev = __dev_get_by_index(t->net, p->link);
+
+       if (tdev)
+               dev->mtu = max_t(int, tdev->mtu - dev->hard_header_len,
+                                IPV6_MIN_MTU);
 }
 
 /**
index b9404fe..2d4680e 100644 (file)
@@ -886,6 +886,7 @@ pref_skip_coa:
                break;
        case IPV6_AUTOFLOWLABEL:
                np->autoflowlabel = valbool;
+               np->autoflowlabel_set = 1;
                retv = 0;
                break;
        case IPV6_RECVFRAGSIZE:
index b3f4d19..1054b05 100644 (file)
@@ -474,7 +474,9 @@ static struct rt6_info *rt6_multipath_select(struct rt6_info *match,
                        if (route_choosen == 0) {
                                struct inet6_dev *idev = sibling->rt6i_idev;
 
-                               if (!netif_carrier_ok(sibling->dst.dev) &&
+                               if (sibling->rt6i_nh_flags & RTNH_F_DEAD)
+                                       break;
+                               if (sibling->rt6i_nh_flags & RTNH_F_LINKDOWN &&
                                    idev->cnf.ignore_routes_with_linkdown)
                                        break;
                                if (rt6_score_route(sibling, oif, strict) < 0)
@@ -499,12 +501,15 @@ static inline struct rt6_info *rt6_device_match(struct net *net,
        struct rt6_info *local = NULL;
        struct rt6_info *sprt;
 
-       if (!oif && ipv6_addr_any(saddr))
-               goto out;
+       if (!oif && ipv6_addr_any(saddr) && !(rt->rt6i_nh_flags & RTNH_F_DEAD))
+               return rt;
 
        for (sprt = rt; sprt; sprt = rcu_dereference(sprt->rt6_next)) {
                struct net_device *dev = sprt->dst.dev;
 
+               if (sprt->rt6i_nh_flags & RTNH_F_DEAD)
+                       continue;
+
                if (oif) {
                        if (dev->ifindex == oif)
                                return sprt;
@@ -533,8 +538,8 @@ static inline struct rt6_info *rt6_device_match(struct net *net,
                if (flags & RT6_LOOKUP_F_IFACE)
                        return net->ipv6.ip6_null_entry;
        }
-out:
-       return rt;
+
+       return rt->rt6i_nh_flags & RTNH_F_DEAD ? net->ipv6.ip6_null_entry : rt;
 }
 
 #ifdef CONFIG_IPV6_ROUTER_PREF
@@ -679,10 +684,12 @@ static struct rt6_info *find_match(struct rt6_info *rt, int oif, int strict,
        int m;
        bool match_do_rr = false;
        struct inet6_dev *idev = rt->rt6i_idev;
-       struct net_device *dev = rt->dst.dev;
 
-       if (dev && !netif_carrier_ok(dev) &&
-           idev->cnf.ignore_routes_with_linkdown &&
+       if (rt->rt6i_nh_flags & RTNH_F_DEAD)
+               goto out;
+
+       if (idev->cnf.ignore_routes_with_linkdown &&
+           rt->rt6i_nh_flags & RTNH_F_LINKDOWN &&
            !(strict & RT6_LOOKUP_F_IGNORE_LINKSTATE))
                goto out;
 
@@ -1346,7 +1353,9 @@ out:
 
        /* Update fn->fn_sernum to invalidate all cached dst */
        if (!err) {
+               spin_lock_bh(&ort->rt6i_table->tb6_lock);
                fib6_update_sernum(ort);
+               spin_unlock_bh(&ort->rt6i_table->tb6_lock);
                fib6_force_start_gc(net);
        }
 
@@ -2154,6 +2163,8 @@ static struct rt6_info *__ip6_route_redirect(struct net *net,
        fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
 restart:
        for_each_fib6_node_rt_rcu(fn) {
+               if (rt->rt6i_nh_flags & RTNH_F_DEAD)
+                       continue;
                if (rt6_check_expired(rt))
                        continue;
                if (rt->dst.error)
@@ -2336,6 +2347,7 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
        }
 
        rt->dst.flags |= DST_HOST;
+       rt->dst.input = ip6_input;
        rt->dst.output  = ip6_output;
        rt->rt6i_gateway  = fl6->daddr;
        rt->rt6i_dst.addr = fl6->daddr;
@@ -2343,7 +2355,7 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
        rt->rt6i_idev     = idev;
        dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 0);
 
-       /* Add this dst into uncached_list so that rt6_ifdown() can
+       /* Add this dst into uncached_list so that rt6_disable_ip() can
         * do proper release of the net_device
         */
        rt6_uncached_list_add(rt);
@@ -2745,6 +2757,9 @@ static struct rt6_info *ip6_route_info_create(struct fib6_config *cfg,
        rt->rt6i_flags = cfg->fc_flags;
 
 install_route:
+       if (!(rt->rt6i_flags & (RTF_LOCAL | RTF_ANYCAST)) &&
+           !netif_carrier_ok(dev))
+               rt->rt6i_nh_flags |= RTNH_F_LINKDOWN;
        rt->dst.dev = dev;
        rt->rt6i_idev = idev;
        rt->rt6i_table = table;
@@ -3458,37 +3473,149 @@ void rt6_clean_tohost(struct net *net, struct in6_addr *gateway)
        fib6_clean_all(net, fib6_clean_tohost, gateway);
 }
 
-struct arg_dev_net {
-       struct net_device *dev;
-       struct net *net;
+struct arg_netdev_event {
+       const struct net_device *dev;
+       union {
+               unsigned int nh_flags;
+               unsigned long event;
+       };
 };
 
+static int fib6_ifup(struct rt6_info *rt, void *p_arg)
+{
+       const struct arg_netdev_event *arg = p_arg;
+       const struct net *net = dev_net(arg->dev);
+
+       if (rt != net->ipv6.ip6_null_entry && rt->dst.dev == arg->dev) {
+               rt->rt6i_nh_flags &= ~arg->nh_flags;
+               fib6_update_sernum_upto_root(dev_net(rt->dst.dev), rt);
+       }
+
+       return 0;
+}
+
+void rt6_sync_up(struct net_device *dev, unsigned int nh_flags)
+{
+       struct arg_netdev_event arg = {
+               .dev = dev,
+               .nh_flags = nh_flags,
+       };
+
+       if (nh_flags & RTNH_F_DEAD && netif_carrier_ok(dev))
+               arg.nh_flags |= RTNH_F_LINKDOWN;
+
+       fib6_clean_all(dev_net(dev), fib6_ifup, &arg);
+}
+
+static bool rt6_multipath_uses_dev(const struct rt6_info *rt,
+                                  const struct net_device *dev)
+{
+       struct rt6_info *iter;
+
+       if (rt->dst.dev == dev)
+               return true;
+       list_for_each_entry(iter, &rt->rt6i_siblings, rt6i_siblings)
+               if (iter->dst.dev == dev)
+                       return true;
+
+       return false;
+}
+
+static void rt6_multipath_flush(struct rt6_info *rt)
+{
+       struct rt6_info *iter;
+
+       rt->should_flush = 1;
+       list_for_each_entry(iter, &rt->rt6i_siblings, rt6i_siblings)
+               iter->should_flush = 1;
+}
+
+static unsigned int rt6_multipath_dead_count(const struct rt6_info *rt,
+                                            const struct net_device *down_dev)
+{
+       struct rt6_info *iter;
+       unsigned int dead = 0;
+
+       if (rt->dst.dev == down_dev || rt->rt6i_nh_flags & RTNH_F_DEAD)
+               dead++;
+       list_for_each_entry(iter, &rt->rt6i_siblings, rt6i_siblings)
+               if (iter->dst.dev == down_dev ||
+                   iter->rt6i_nh_flags & RTNH_F_DEAD)
+                       dead++;
+
+       return dead;
+}
+
+static void rt6_multipath_nh_flags_set(struct rt6_info *rt,
+                                      const struct net_device *dev,
+                                      unsigned int nh_flags)
+{
+       struct rt6_info *iter;
+
+       if (rt->dst.dev == dev)
+               rt->rt6i_nh_flags |= nh_flags;
+       list_for_each_entry(iter, &rt->rt6i_siblings, rt6i_siblings)
+               if (iter->dst.dev == dev)
+                       iter->rt6i_nh_flags |= nh_flags;
+}
+
 /* called with write lock held for table with rt */
-static int fib6_ifdown(struct rt6_info *rt, void *arg)
+static int fib6_ifdown(struct rt6_info *rt, void *p_arg)
 {
-       const struct arg_dev_net *adn = arg;
-       const struct net_device *dev = adn->dev;
+       const struct arg_netdev_event *arg = p_arg;
+       const struct net_device *dev = arg->dev;
+       const struct net *net = dev_net(dev);
 
-       if ((rt->dst.dev == dev || !dev) &&
-           rt != adn->net->ipv6.ip6_null_entry &&
-           (rt->rt6i_nsiblings == 0 ||
-            (dev && netdev_unregistering(dev)) ||
-            !rt->rt6i_idev->cnf.ignore_routes_with_linkdown))
-               return -1;
+       if (rt == net->ipv6.ip6_null_entry)
+               return 0;
+
+       switch (arg->event) {
+       case NETDEV_UNREGISTER:
+               return rt->dst.dev == dev ? -1 : 0;
+       case NETDEV_DOWN:
+               if (rt->should_flush)
+                       return -1;
+               if (!rt->rt6i_nsiblings)
+                       return rt->dst.dev == dev ? -1 : 0;
+               if (rt6_multipath_uses_dev(rt, dev)) {
+                       unsigned int count;
+
+                       count = rt6_multipath_dead_count(rt, dev);
+                       if (rt->rt6i_nsiblings + 1 == count) {
+                               rt6_multipath_flush(rt);
+                               return -1;
+                       }
+                       rt6_multipath_nh_flags_set(rt, dev, RTNH_F_DEAD |
+                                                  RTNH_F_LINKDOWN);
+                       fib6_update_sernum(rt);
+               }
+               return -2;
+       case NETDEV_CHANGE:
+               if (rt->dst.dev != dev ||
+                   rt->rt6i_flags & (RTF_LOCAL | RTF_ANYCAST))
+                       break;
+               rt->rt6i_nh_flags |= RTNH_F_LINKDOWN;
+               break;
+       }
 
        return 0;
 }
 
-void rt6_ifdown(struct net *net, struct net_device *dev)
+void rt6_sync_down_dev(struct net_device *dev, unsigned long event)
 {
-       struct arg_dev_net adn = {
+       struct arg_netdev_event arg = {
                .dev = dev,
-               .net = net,
+               .event = event,
        };
 
-       fib6_clean_all(net, fib6_ifdown, &adn);
-       if (dev)
-               rt6_uncached_list_flush_dev(net, dev);
+       fib6_clean_all(dev_net(dev), fib6_ifdown, &arg);
+}
+
+void rt6_disable_ip(struct net_device *dev, unsigned long event)
+{
+       rt6_sync_down_dev(dev, event);
+       rt6_uncached_list_flush_dev(dev_net(dev), dev);
+       neigh_ifdown(&nd_tbl, dev);
 }
 
 struct rt6_mtu_change_arg {
@@ -3991,7 +4118,10 @@ static size_t rt6_nlmsg_size(struct rt6_info *rt)
 static int rt6_nexthop_info(struct sk_buff *skb, struct rt6_info *rt,
                            unsigned int *flags, bool skip_oif)
 {
-       if (!netif_running(rt->dst.dev) || !netif_carrier_ok(rt->dst.dev)) {
+       if (rt->rt6i_nh_flags & RTNH_F_DEAD)
+               *flags |= RTNH_F_DEAD;
+
+       if (rt->rt6i_nh_flags & RTNH_F_LINKDOWN) {
                *flags |= RTNH_F_LINKDOWN;
                if (rt->rt6i_idev->cnf.ignore_routes_with_linkdown)
                        *flags |= RTNH_F_DEAD;
@@ -4297,19 +4427,13 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
                if (!ipv6_addr_any(&fl6.saddr))
                        flags |= RT6_LOOKUP_F_HAS_SADDR;
 
-               if (!fibmatch)
-                       dst = ip6_route_input_lookup(net, dev, &fl6, flags);
-               else
-                       dst = ip6_route_lookup(net, &fl6, 0);
+               dst = ip6_route_input_lookup(net, dev, &fl6, flags);
 
                rcu_read_unlock();
        } else {
                fl6.flowi6_oif = oif;
 
-               if (!fibmatch)
-                       dst = ip6_route_output(net, NULL, &fl6);
-               else
-                       dst = ip6_route_lookup(net, &fl6, 0);
+               dst = ip6_route_output(net, NULL, &fl6);
        }
 
 
@@ -4326,6 +4450,14 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
                goto errout;
        }
 
+       if (fibmatch && rt->from) {
+               struct rt6_info *ort = rt->from;
+
+               dst_hold(&ort->dst);
+               ip6_rt_put(rt);
+               rt = ort;
+       }
+
        skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
        if (!skb) {
                ip6_rt_put(rt);
index 7178476..aa12a26 100644 (file)
@@ -1795,7 +1795,7 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
                timer_expires = jiffies;
        }
 
-       state = sk_state_load(sp);
+       state = inet_sk_state_load(sp);
        if (state == TCP_LISTEN)
                rx_queue = sp->sk_ack_backlog;
        else
index fe04e23..841f4a0 100644 (file)
@@ -32,6 +32,14 @@ int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi,
 }
 EXPORT_SYMBOL(xfrm6_rcv_spi);
 
+static int xfrm6_transport_finish2(struct net *net, struct sock *sk,
+                                  struct sk_buff *skb)
+{
+       if (xfrm_trans_queue(skb, ip6_rcv_finish))
+               __kfree_skb(skb);
+       return -1;
+}
+
 int xfrm6_transport_finish(struct sk_buff *skb, int async)
 {
        struct xfrm_offload *xo = xfrm_offload(skb);
@@ -56,7 +64,7 @@ int xfrm6_transport_finish(struct sk_buff *skb, int async)
 
        NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING,
                dev_net(skb->dev), NULL, skb, skb->dev, NULL,
-               ip6_rcv_finish);
+               xfrm6_transport_finish2);
        return -1;
 }
 
index e66b94f..4e12859 100644 (file)
@@ -105,17 +105,14 @@ static struct sk_buff *xfrm6_mode_tunnel_gso_segment(struct xfrm_state *x,
 {
        __skb_push(skb, skb->mac_len);
        return skb_mac_gso_segment(skb, features);
-
 }
 
 static void xfrm6_mode_tunnel_xmit(struct xfrm_state *x, struct sk_buff *skb)
 {
        struct xfrm_offload *xo = xfrm_offload(skb);
 
-       if (xo->flags & XFRM_GSO_SEGMENT) {
-               skb->network_header = skb->network_header - x->props.header_len;
+       if (xo->flags & XFRM_GSO_SEGMENT)
                skb->transport_header = skb->network_header + sizeof(struct ipv6hdr);
-       }
 
        skb_reset_mac_len(skb);
        pskb_pull(skb, skb->mac_len + x->props.header_len);
index 115918a..786cd7f 100644 (file)
@@ -780,10 +780,8 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
                }
        }
 
-       /* Session data offset is handled differently for L2TPv2 and
-        * L2TPv3. For L2TPv2, there is an optional 16-bit value in
-        * the header. For L2TPv3, the offset is negotiated using AVPs
-        * in the session setup control protocol.
+       /* Session data offset is defined only for L2TPv2 and is
+        * indicated by an optional 16-bit value in the header.
         */
        if (tunnel->version == L2TP_HDR_VER_2) {
                /* If offset bit set, skip it. */
@@ -791,8 +789,7 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
                        offset = ntohs(*(__be16 *)ptr);
                        ptr += 2 + offset;
                }
-       } else
-               ptr += session->offset;
+       }
 
        offset = ptr - optr;
        if (!pskb_may_pull(skb, offset))
@@ -1068,8 +1065,6 @@ static int l2tp_build_l2tpv3_header(struct l2tp_session *session, void *buf)
                }
                bufp += session->l2specific_len;
        }
-       if (session->offset)
-               bufp += session->offset;
 
        return bufp - optr;
 }
@@ -1734,7 +1729,7 @@ void l2tp_session_set_header_len(struct l2tp_session *session, int version)
                if (session->send_seq)
                        session->hdr_len += 4;
        } else {
-               session->hdr_len = 4 + session->cookie_len + session->l2specific_len + session->offset;
+               session->hdr_len = 4 + session->cookie_len + session->l2specific_len;
                if (session->tunnel->encap == L2TP_ENCAPTYPE_UDP)
                        session->hdr_len += 4;
        }
@@ -1784,7 +1779,6 @@ struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunn
                        session->recv_seq = cfg->recv_seq;
                        session->lns_mode = cfg->lns_mode;
                        session->reorder_timeout = cfg->reorder_timeout;
-                       session->offset = cfg->offset;
                        session->l2specific_type = cfg->l2specific_type;
                        session->l2specific_len = cfg->l2specific_len;
                        session->cookie_len = cfg->cookie_len;
index 9534e16..c2e9bbd 100644 (file)
@@ -59,7 +59,6 @@ struct l2tp_session_cfg {
        int                     debug;          /* bitmask of debug message
                                                 * categories */
        u16                     vlan_id;        /* VLAN pseudowire only */
-       u16                     offset;         /* offset to payload */
        u16                     l2specific_len; /* Layer 2 specific length */
        u16                     l2specific_type; /* Layer 2 specific type */
        u8                      cookie[8];      /* optional cookie */
@@ -86,8 +85,6 @@ struct l2tp_session {
        int                     cookie_len;
        u8                      peer_cookie[8];
        int                     peer_cookie_len;
-       u16                     offset;         /* offset from end of L2TP header
-                                                  to beginning of data */
        u16                     l2specific_len;
        u16                     l2specific_type;
        u16                     hdr_len;
index eb69411..2c30587 100644 (file)
@@ -180,8 +180,8 @@ static void l2tp_dfs_seq_session_show(struct seq_file *m, void *v)
                   session->lns_mode ? "LNS" : "LAC",
                   session->debug,
                   jiffies_to_msecs(session->reorder_timeout));
-       seq_printf(m, "   offset %hu l2specific %hu/%hu\n",
-                  session->offset, session->l2specific_type, session->l2specific_len);
+       seq_printf(m, "   offset 0 l2specific %hu/%hu\n",
+                  session->l2specific_type, session->l2specific_len);
        if (session->cookie_len) {
                seq_printf(m, "   cookie %02x%02x%02x%02x",
                           session->cookie[0], session->cookie[1],
index a1f24fb..e1ca29f 100644 (file)
@@ -547,9 +547,6 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf
        }
 
        if (tunnel->version > 2) {
-               if (info->attrs[L2TP_ATTR_OFFSET])
-                       cfg.offset = nla_get_u16(info->attrs[L2TP_ATTR_OFFSET]);
-
                if (info->attrs[L2TP_ATTR_DATA_SEQ])
                        cfg.data_seq = nla_get_u8(info->attrs[L2TP_ATTR_DATA_SEQ]);
 
index d444752..a8b1616 100644 (file)
@@ -153,27 +153,16 @@ EXPORT_SYMBOL(ieee80211_stop_rx_ba_session);
  */
 static void sta_rx_agg_session_timer_expired(struct timer_list *t)
 {
-       struct tid_ampdu_rx *tid_rx_timer =
-               from_timer(tid_rx_timer, t, session_timer);
-       struct sta_info *sta = tid_rx_timer->sta;
-       u8 tid = tid_rx_timer->tid;
-       struct tid_ampdu_rx *tid_rx;
+       struct tid_ampdu_rx *tid_rx = from_timer(tid_rx, t, session_timer);
+       struct sta_info *sta = tid_rx->sta;
+       u8 tid = tid_rx->tid;
        unsigned long timeout;
 
-       rcu_read_lock();
-       tid_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
-       if (!tid_rx) {
-               rcu_read_unlock();
-               return;
-       }
-
        timeout = tid_rx->last_rx + TU_TO_JIFFIES(tid_rx->timeout);
        if (time_is_after_jiffies(timeout)) {
                mod_timer(&tid_rx->session_timer, timeout);
-               rcu_read_unlock();
                return;
        }
-       rcu_read_unlock();
 
        ht_dbg(sta->sdata, "RX session timer expired on %pM tid %d\n",
               sta->sta.addr, tid);
@@ -415,10 +404,11 @@ end:
                                          timeout);
 }
 
-void __ieee80211_start_rx_ba_session(struct sta_info *sta,
-                                    u8 dialog_token, u16 timeout,
-                                    u16 start_seq_num, u16 ba_policy, u16 tid,
-                                    u16 buf_size, bool tx, bool auto_seq)
+static void __ieee80211_start_rx_ba_session(struct sta_info *sta,
+                                           u8 dialog_token, u16 timeout,
+                                           u16 start_seq_num, u16 ba_policy,
+                                           u16 tid, u16 buf_size, bool tx,
+                                           bool auto_seq)
 {
        mutex_lock(&sta->ampdu_mlme.mtx);
        ___ieee80211_start_rx_ba_session(sta, dialog_token, timeout,
index 5f8ab5b..595c662 100644 (file)
@@ -392,7 +392,8 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
         * telling the driver. New packets will not go through since
         * the aggregation session is no longer OPERATIONAL.
         */
-       synchronize_net();
+       if (!local->in_reconfig)
+               synchronize_net();
 
        tid_tx->stop_initiator = reason == AGG_STOP_PEER_REQUEST ?
                                        WLAN_BACK_RECIPIENT :
@@ -429,18 +430,12 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
  */
 static void sta_addba_resp_timer_expired(struct timer_list *t)
 {
-       struct tid_ampdu_tx *tid_tx_timer =
-               from_timer(tid_tx_timer, t, addba_resp_timer);
-       struct sta_info *sta = tid_tx_timer->sta;
-       u8 tid = tid_tx_timer->tid;
-       struct tid_ampdu_tx *tid_tx;
+       struct tid_ampdu_tx *tid_tx = from_timer(tid_tx, t, addba_resp_timer);
+       struct sta_info *sta = tid_tx->sta;
+       u8 tid = tid_tx->tid;
 
        /* check if the TID waits for addBA response */
-       rcu_read_lock();
-       tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[tid]);
-       if (!tid_tx ||
-           test_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state)) {
-               rcu_read_unlock();
+       if (test_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state)) {
                ht_dbg(sta->sdata,
                       "timer expired on %pM tid %d not expecting addBA response\n",
                       sta->sta.addr, tid);
@@ -451,7 +446,6 @@ static void sta_addba_resp_timer_expired(struct timer_list *t)
               sta->sta.addr, tid);
 
        ieee80211_stop_tx_ba_session(&sta->sta, tid);
-       rcu_read_unlock();
 }
 
 void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
@@ -529,29 +523,21 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
  */
 static void sta_tx_agg_session_timer_expired(struct timer_list *t)
 {
-       struct tid_ampdu_tx *tid_tx_timer =
-               from_timer(tid_tx_timer, t, session_timer);
-       struct sta_info *sta = tid_tx_timer->sta;
-       u8 tid = tid_tx_timer->tid;
-       struct tid_ampdu_tx *tid_tx;
+       struct tid_ampdu_tx *tid_tx = from_timer(tid_tx, t, session_timer);
+       struct sta_info *sta = tid_tx->sta;
+       u8 tid = tid_tx->tid;
        unsigned long timeout;
 
-       rcu_read_lock();
-       tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[tid]);
-       if (!tid_tx || test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
-               rcu_read_unlock();
+       if (test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
                return;
        }
 
        timeout = tid_tx->last_tx + TU_TO_JIFFIES(tid_tx->timeout);
        if (time_is_after_jiffies(timeout)) {
                mod_timer(&tid_tx->session_timer, timeout);
-               rcu_read_unlock();
                return;
        }
 
-       rcu_read_unlock();
-
        ht_dbg(sta->sdata, "tx session timer expired on %pM tid %d\n",
               sta->sta.addr, tid);
 
index fb15d3b..46028e1 100644 (file)
@@ -573,10 +573,12 @@ static int ieee80211_get_key(struct wiphy *wiphy, struct net_device *dev,
        case WLAN_CIPHER_SUITE_BIP_CMAC_256:
                BUILD_BUG_ON(offsetof(typeof(kseq), ccmp) !=
                             offsetof(typeof(kseq), aes_cmac));
+               /* fall through */
        case WLAN_CIPHER_SUITE_BIP_GMAC_128:
        case WLAN_CIPHER_SUITE_BIP_GMAC_256:
                BUILD_BUG_ON(offsetof(typeof(kseq), ccmp) !=
                             offsetof(typeof(kseq), aes_gmac));
+               /* fall through */
        case WLAN_CIPHER_SUITE_GCMP:
        case WLAN_CIPHER_SUITE_GCMP_256:
                BUILD_BUG_ON(offsetof(typeof(kseq), ccmp) !=
@@ -2205,6 +2207,7 @@ static int ieee80211_scan(struct wiphy *wiphy,
                 * for now fall through to allow scanning only when
                 * beaconing hasn't been configured yet
                 */
+               /* fall through */
        case NL80211_IFTYPE_AP:
                /*
                 * If the scan has been forced (and the driver supports
@@ -2373,10 +2376,17 @@ static int ieee80211_set_tx_power(struct wiphy *wiphy,
        struct ieee80211_sub_if_data *sdata;
        enum nl80211_tx_power_setting txp_type = type;
        bool update_txp_type = false;
+       bool has_monitor = false;
 
        if (wdev) {
                sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
 
+               if (sdata->vif.type == NL80211_IFTYPE_MONITOR) {
+                       sdata = rtnl_dereference(local->monitor_sdata);
+                       if (!sdata)
+                               return -EOPNOTSUPP;
+               }
+
                switch (type) {
                case NL80211_TX_POWER_AUTOMATIC:
                        sdata->user_power_level = IEEE80211_UNSET_POWER_LEVEL;
@@ -2415,15 +2425,34 @@ static int ieee80211_set_tx_power(struct wiphy *wiphy,
 
        mutex_lock(&local->iflist_mtx);
        list_for_each_entry(sdata, &local->interfaces, list) {
+               if (sdata->vif.type == NL80211_IFTYPE_MONITOR) {
+                       has_monitor = true;
+                       continue;
+               }
                sdata->user_power_level = local->user_power_level;
                if (txp_type != sdata->vif.bss_conf.txpower_type)
                        update_txp_type = true;
                sdata->vif.bss_conf.txpower_type = txp_type;
        }
-       list_for_each_entry(sdata, &local->interfaces, list)
+       list_for_each_entry(sdata, &local->interfaces, list) {
+               if (sdata->vif.type == NL80211_IFTYPE_MONITOR)
+                       continue;
                ieee80211_recalc_txpower(sdata, update_txp_type);
+       }
        mutex_unlock(&local->iflist_mtx);
 
+       if (has_monitor) {
+               sdata = rtnl_dereference(local->monitor_sdata);
+               if (sdata) {
+                       sdata->user_power_level = local->user_power_level;
+                       if (txp_type != sdata->vif.bss_conf.txpower_type)
+                               update_txp_type = true;
+                       sdata->vif.bss_conf.txpower_type = txp_type;
+
+                       ieee80211_recalc_txpower(sdata, update_txp_type);
+               }
+       }
+
        return 0;
 }
 
index 5fae001..1f466d1 100644 (file)
@@ -211,6 +211,7 @@ static const char *hw_flag_names[] = {
        FLAG(TX_FRAG_LIST),
        FLAG(REPORTS_LOW_ACK),
        FLAG(SUPPORTS_TX_FRAG),
+       FLAG(SUPPORTS_TDLS_BUFFER_STA),
 #undef FLAG
 };
 
index c7f93fd..4d82fe7 100644 (file)
@@ -165,7 +165,8 @@ static inline void drv_bss_info_changed(struct ieee80211_local *local,
        if (WARN_ON_ONCE(sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE ||
                         sdata->vif.type == NL80211_IFTYPE_NAN ||
                         (sdata->vif.type == NL80211_IFTYPE_MONITOR &&
-                         !sdata->vif.mu_mimo_owner)))
+                         !sdata->vif.mu_mimo_owner &&
+                         !(changed & BSS_CHANGED_TXPOWER))))
                return;
 
        if (!check_sdata_in_driver(sdata))
index 1621b6a..d752353 100644 (file)
@@ -492,6 +492,7 @@ int ieee80211_send_smps_action(struct ieee80211_sub_if_data *sdata,
        case IEEE80211_SMPS_AUTOMATIC:
        case IEEE80211_SMPS_NUM_MODES:
                WARN_ON(1);
+               /* fall through */
        case IEEE80211_SMPS_OFF:
                action_frame->u.action.u.ht_smps.smps_control =
                                WLAN_HT_SMPS_CONTROL_DISABLED;
index 885d00b..2690002 100644 (file)
@@ -1757,10 +1757,6 @@ void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
                                     u16 initiator, u16 reason, bool stop);
 void __ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
                                    u16 initiator, u16 reason, bool stop);
-void __ieee80211_start_rx_ba_session(struct sta_info *sta,
-                                    u8 dialog_token, u16 timeout,
-                                    u16 start_seq_num, u16 ba_policy, u16 tid,
-                                    u16 buf_size, bool tx, bool auto_seq);
 void ___ieee80211_start_rx_ba_session(struct sta_info *sta,
                                      u8 dialog_token, u16 timeout,
                                      u16 start_seq_num, u16 ba_policy, u16 tid,
index 13b16f9..5fe01f8 100644 (file)
@@ -1474,7 +1474,7 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata,
                break;
        case NL80211_IFTYPE_UNSPECIFIED:
        case NUM_NL80211_IFTYPES:
-               BUG();
+               WARN_ON(1);
                break;
        }
 
@@ -1633,7 +1633,7 @@ static void ieee80211_assign_perm_addr(struct ieee80211_local *local,
                                goto out_unlock;
                        }
                }
-               /* otherwise fall through */
+               /* fall through */
        default:
                /* assign a new address if possible -- try n_addresses first */
                for (i = 0; i < local->hw.wiphy->n_addresses; i++) {
index 9380493..aee05ec 100644 (file)
@@ -178,13 +178,17 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
        if (!ret) {
                key->flags |= KEY_FLAG_UPLOADED_TO_HARDWARE;
 
-               if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) ||
+               if (!((key->conf.flags & (IEEE80211_KEY_FLAG_GENERATE_MMIC |
+                                          IEEE80211_KEY_FLAG_PUT_MIC_SPACE)) ||
                      (key->conf.flags & IEEE80211_KEY_FLAG_RESERVE_TAILROOM)))
                        decrease_tailroom_need_count(sdata, 1);
 
                WARN_ON((key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE) &&
                        (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV));
 
+               WARN_ON((key->conf.flags & IEEE80211_KEY_FLAG_PUT_MIC_SPACE) &&
+                       (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC));
+
                return 0;
        }
 
@@ -237,7 +241,8 @@ static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key)
        sta = key->sta;
        sdata = key->sdata;
 
-       if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) ||
+       if (!((key->conf.flags & (IEEE80211_KEY_FLAG_GENERATE_MMIC |
+                                  IEEE80211_KEY_FLAG_PUT_MIC_SPACE)) ||
              (key->conf.flags & IEEE80211_KEY_FLAG_RESERVE_TAILROOM)))
                increment_tailroom_need_count(sdata);
 
@@ -1104,7 +1109,8 @@ void ieee80211_remove_key(struct ieee80211_key_conf *keyconf)
        if (key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) {
                key->flags &= ~KEY_FLAG_UPLOADED_TO_HARDWARE;
 
-               if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) ||
+               if (!((key->conf.flags & (IEEE80211_KEY_FLAG_GENERATE_MMIC |
+                                          IEEE80211_KEY_FLAG_PUT_MIC_SPACE)) ||
                      (key->conf.flags & IEEE80211_KEY_FLAG_RESERVE_TAILROOM)))
                        increment_tailroom_need_count(key->sdata);
        }
index e054a2f..0785d04 100644 (file)
@@ -263,6 +263,9 @@ static void ieee80211_restart_work(struct work_struct *work)
        flush_delayed_work(&local->roc_work);
        flush_work(&local->hw_roc_done);
 
+       /* wait for all packet processing to be done */
+       synchronize_net();
+
        ieee80211_reconfig(local);
        rtnl_unlock();
 }
index 5e27364..73ac607 100644 (file)
@@ -989,8 +989,10 @@ ieee80211_mesh_process_chnswitch(struct ieee80211_sub_if_data *sdata,
        switch (sdata->vif.bss_conf.chandef.width) {
        case NL80211_CHAN_WIDTH_20_NOHT:
                sta_flags |= IEEE80211_STA_DISABLE_HT;
+               /* fall through */
        case NL80211_CHAN_WIDTH_20:
                sta_flags |= IEEE80211_STA_DISABLE_40MHZ;
+               /* fall through */
        case NL80211_CHAN_WIDTH_40:
                sta_flags |= IEEE80211_STA_DISABLE_VHT;
                break;
index 4394463..35ad398 100644 (file)
@@ -1250,6 +1250,7 @@ void mesh_path_tx_root_frame(struct ieee80211_sub_if_data *sdata)
                break;
        case IEEE80211_PROACTIVE_PREQ_WITH_PREP:
                flags |= IEEE80211_PREQ_PROACTIVE_PREP_FLAG;
+               /* fall through */
        case IEEE80211_PROACTIVE_PREQ_NO_PREP:
                interval = ifmsh->mshcfg.dot11MeshHWMPactivePathToRootTimeout;
                target_flags |= IEEE80211_PREQ_TO_FLAG |
index e2d00cc..0f6c9ca 100644 (file)
@@ -672,7 +672,7 @@ void mesh_plink_timer(struct timer_list *t)
                        break;
                }
                reason = WLAN_REASON_MESH_MAX_RETRIES;
-               /* fall through on else */
+               /* fall through */
        case NL80211_PLINK_CNF_RCVD:
                /* confirm timer */
                if (!reason)
index c244691..39b660b 100644 (file)
@@ -473,6 +473,7 @@ static void ieee80211_add_ht_ie(struct ieee80211_sub_if_data *sdata,
        case IEEE80211_SMPS_AUTOMATIC:
        case IEEE80211_SMPS_NUM_MODES:
                WARN_ON(1);
+               /* fall through */
        case IEEE80211_SMPS_OFF:
                cap |= WLAN_HT_CAP_SM_PS_DISABLED <<
                        IEEE80211_HT_CAP_SM_PS_SHIFT;
@@ -2861,10 +2862,11 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
        aid = le16_to_cpu(mgmt->u.assoc_resp.aid);
        capab_info = le16_to_cpu(mgmt->u.assoc_resp.capab_info);
 
-       if ((aid & (BIT(15) | BIT(14))) != (BIT(15) | BIT(14)))
-               sdata_info(sdata, "invalid AID value 0x%x; bits 15:14 not set\n",
-                          aid);
-       aid &= ~(BIT(15) | BIT(14));
+       /*
+        * The 5 MSB of the AID field are reserved
+        * (802.11-2016 9.4.1.8 AID field)
+        */
+       aid &= 0x7ff;
 
        ifmgd->broken_ap = false;
 
index faf4f60..f1d40b6 100644 (file)
@@ -801,14 +801,14 @@ int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
        case NL80211_IFTYPE_ADHOC:
                if (!sdata->vif.bss_conf.ibss_joined)
                        need_offchan = true;
-               /* fall through */
 #ifdef CONFIG_MAC80211_MESH
+               /* fall through */
        case NL80211_IFTYPE_MESH_POINT:
                if (ieee80211_vif_is_mesh(&sdata->vif) &&
                    !sdata->u.mesh.mesh_id_len)
                        need_offchan = true;
-               /* fall through */
 #endif
+               /* fall through */
        case NL80211_IFTYPE_AP:
        case NL80211_IFTYPE_AP_VLAN:
        case NL80211_IFTYPE_P2P_GO:
index 70e9d2c..b3cff69 100644 (file)
@@ -1607,23 +1607,16 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
 
        /*
         * Change STA power saving mode only at the end of a frame
-        * exchange sequence.
+        * exchange sequence, and only for a data or management
+        * frame as specified in IEEE 802.11-2016 11.2.3.2
         */
        if (!ieee80211_hw_check(&sta->local->hw, AP_LINK_PS) &&
            !ieee80211_has_morefrags(hdr->frame_control) &&
-           !ieee80211_is_back_req(hdr->frame_control) &&
+           (ieee80211_is_mgmt(hdr->frame_control) ||
+            ieee80211_is_data(hdr->frame_control)) &&
            !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) &&
            (rx->sdata->vif.type == NL80211_IFTYPE_AP ||
-            rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) &&
-           /*
-            * PM bit is only checked in frames where it isn't reserved,
-            * in AP mode it's reserved in non-bufferable management frames
-            * (cf. IEEE 802.11-2012 8.2.4.1.7 Power Management field)
-            * BAR frames should be ignored as specified in
-            * IEEE 802.11-2012 10.2.1.2.
-            */
-           (!ieee80211_is_mgmt(hdr->frame_control) ||
-            ieee80211_is_bufferable_mmpdu(hdr->frame_control))) {
+            rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) {
                if (test_sta_flag(sta, WLAN_STA_PS_STA)) {
                        if (!ieee80211_has_pm(hdr->frame_control))
                                sta_ps_end(sta);
index 91093d4..5cd5e6e 100644 (file)
@@ -47,6 +47,8 @@ static void ieee80211_tdls_add_ext_capab(struct ieee80211_sub_if_data *sdata,
                           NL80211_FEATURE_TDLS_CHANNEL_SWITCH;
        bool wider_band = ieee80211_hw_check(&local->hw, TDLS_WIDER_BW) &&
                          !ifmgd->tdls_wider_bw_prohibited;
+       bool buffer_sta = ieee80211_hw_check(&local->hw,
+                                            SUPPORTS_TDLS_BUFFER_STA);
        struct ieee80211_supported_band *sband = ieee80211_get_sband(sdata);
        bool vht = sband && sband->vht_cap.vht_supported;
        u8 *pos = skb_put(skb, 10);
@@ -56,7 +58,8 @@ static void ieee80211_tdls_add_ext_capab(struct ieee80211_sub_if_data *sdata,
        *pos++ = 0x0;
        *pos++ = 0x0;
        *pos++ = 0x0;
-       *pos++ = chan_switch ? WLAN_EXT_CAPA4_TDLS_CHAN_SWITCH : 0;
+       *pos++ = (chan_switch ? WLAN_EXT_CAPA4_TDLS_CHAN_SWITCH : 0) |
+                (buffer_sta ? WLAN_EXT_CAPA4_TDLS_BUFFER_STA : 0);
        *pos++ = WLAN_EXT_CAPA5_TDLS_ENABLED;
        *pos++ = 0;
        *pos++ = 0;
@@ -236,6 +239,7 @@ static enum ieee80211_ac_numbers ieee80211_ac_from_wmm(int ac)
        switch (ac) {
        default:
                WARN_ON_ONCE(1);
+               /* fall through */
        case 0:
                return IEEE80211_AC_BE;
        case 1:
index 3160954..25904af 100644 (file)
@@ -2922,7 +2922,9 @@ void ieee80211_check_fast_xmit(struct sta_info *sta)
 
                gen_iv = build.key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV;
                iv_spc = build.key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE;
-               mmic = build.key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC;
+               mmic = build.key->conf.flags &
+                       (IEEE80211_KEY_FLAG_GENERATE_MMIC |
+                        IEEE80211_KEY_FLAG_PUT_MIC_SPACE);
 
                /* don't handle software crypto */
                if (!(build.key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE))
index d57e5f6..1f82191 100644 (file)
@@ -2110,15 +2110,6 @@ int ieee80211_reconfig(struct ieee80211_local *local)
                cfg80211_sched_scan_stopped_rtnl(local->hw.wiphy, 0);
 
  wake_up:
-       if (local->in_reconfig) {
-               local->in_reconfig = false;
-               barrier();
-
-               /* Restart deferred ROCs */
-               mutex_lock(&local->mtx);
-               ieee80211_start_next_roc(local);
-               mutex_unlock(&local->mtx);
-       }
 
        if (local->monitors == local->open_count && local->monitors > 0)
                ieee80211_add_virtual_monitor(local);
@@ -2146,6 +2137,16 @@ int ieee80211_reconfig(struct ieee80211_local *local)
                mutex_unlock(&local->sta_mtx);
        }
 
+       if (local->in_reconfig) {
+               local->in_reconfig = false;
+               barrier();
+
+               /* Restart deferred ROCs */
+               mutex_lock(&local->mtx);
+               ieee80211_start_next_roc(local);
+               mutex_unlock(&local->mtx);
+       }
+
        ieee80211_wake_queues_by_reason(hw, IEEE80211_MAX_QUEUE_MAP,
                                        IEEE80211_QUEUE_STOP_REASON_SUSPEND,
                                        false);
index 3e3d301..5f7c963 100644 (file)
@@ -165,6 +165,7 @@ u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
                        qos = sta->sta.wme;
                        break;
                }
+               /* fall through */
        case NL80211_IFTYPE_AP:
                ra = skb->data;
                break;
index b58722d..785056c 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Copyright 2002-2004, Instant802 Networks, Inc.
  * Copyright 2008, Jouni Malinen <j@w1.fi>
- * Copyright (C) 2016 Intel Deutschland GmbH
+ * Copyright (C) 2016-2017 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -59,8 +59,9 @@ ieee80211_tx_h_michael_mic_add(struct ieee80211_tx_data *tx)
        if (info->control.hw_key &&
            (info->flags & IEEE80211_TX_CTL_DONTFRAG ||
             ieee80211_hw_check(&tx->local->hw, SUPPORTS_TX_FRAG)) &&
-           !(tx->key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC)) {
-               /* hwaccel - with no need for SW-generated MMIC */
+           !(tx->key->conf.flags & (IEEE80211_KEY_FLAG_GENERATE_MMIC |
+                                    IEEE80211_KEY_FLAG_PUT_MIC_SPACE))) {
+               /* hwaccel - with no need for SW-generated MMIC or MIC space */
                return TX_CONTINUE;
        }
 
@@ -75,8 +76,15 @@ ieee80211_tx_h_michael_mic_add(struct ieee80211_tx_data *tx)
                 skb_tailroom(skb), tail))
                return TX_DROP;
 
-       key = &tx->key->conf.key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY];
        mic = skb_put(skb, MICHAEL_MIC_LEN);
+
+       if (tx->key->conf.flags & IEEE80211_KEY_FLAG_PUT_MIC_SPACE) {
+               /* Zeroed MIC can help with debug */
+               memset(mic, 0, MICHAEL_MIC_LEN);
+               return TX_CONTINUE;
+       }
+
+       key = &tx->key->conf.key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY];
        michael_mic(key, hdr, data, data_len, mic);
        if (unlikely(info->flags & IEEE80211_TX_INTFL_TKIP_MIC_FAILURE))
                mic[0]++;
index b27c5c6..62f36cc 100644 (file)
@@ -1266,14 +1266,14 @@ static int parse_nat(const struct nlattr *attr,
                /* Do not allow flags if no type is given. */
                if (info->range.flags) {
                        OVS_NLERR(log,
-                                 "NAT flags may be given only when NAT range (SRC or DST) is also specified.\n"
+                                 "NAT flags may be given only when NAT range (SRC or DST) is also specified."
                                  );
                        return -EINVAL;
                }
                info->nat = OVS_CT_NAT;   /* NAT existing connections. */
        } else if (!info->commit) {
                OVS_NLERR(log,
-                         "NAT attributes may be specified only when CT COMMIT flag is also specified.\n"
+                         "NAT attributes may be specified only when CT COMMIT flag is also specified."
                          );
                return -EINVAL;
        }
index 76d050a..56b8e71 100644 (file)
@@ -579,6 +579,7 @@ static int key_extract(struct sk_buff *skb, struct sw_flow_key *key)
                        return -EINVAL;
 
                skb_reset_network_header(skb);
+               key->eth.type = skb->protocol;
        } else {
                eth = eth_hdr(skb);
                ether_addr_copy(key->eth.src, eth->h_source);
@@ -592,15 +593,23 @@ static int key_extract(struct sk_buff *skb, struct sw_flow_key *key)
                if (unlikely(parse_vlan(skb, key)))
                        return -ENOMEM;
 
-               skb->protocol = parse_ethertype(skb);
-               if (unlikely(skb->protocol == htons(0)))
+               key->eth.type = parse_ethertype(skb);
+               if (unlikely(key->eth.type == htons(0)))
                        return -ENOMEM;
 
+               /* Multiple tagged packets need to retain TPID to satisfy
+                * skb_vlan_pop(), which will later shift the ethertype into
+                * skb->protocol.
+                */
+               if (key->eth.cvlan.tci & htons(VLAN_TAG_PRESENT))
+                       skb->protocol = key->eth.cvlan.tpid;
+               else
+                       skb->protocol = key->eth.type;
+
                skb_reset_network_header(skb);
                __skb_push(skb, skb->data - skb_mac_header(skb));
        }
        skb_reset_mac_len(skb);
-       key->eth.type = skb->protocol;
 
        /* Network layer. */
        if (key->eth.type == htons(ETH_P_IP)) {
index da215e5..ee7aa0b 100644 (file)
@@ -247,12 +247,13 @@ static int packet_direct_xmit(struct sk_buff *skb)
        struct sk_buff *orig_skb = skb;
        struct netdev_queue *txq;
        int ret = NETDEV_TX_BUSY;
+       bool again = false;
 
        if (unlikely(!netif_running(dev) ||
                     !netif_carrier_ok(dev)))
                goto drop;
 
-       skb = validate_xmit_skb_list(skb, dev);
+       skb = validate_xmit_skb_list(skb, dev, &again);
        if (skb != orig_skb)
                goto drop;
 
index 75d43dc..5aa3a64 100644 (file)
@@ -114,6 +114,7 @@ static int rds_add_bound(struct rds_sock *rs, __be32 addr, __be16 *port)
                          rs, &addr, (int)ntohs(*port));
                        break;
                } else {
+                       rs->rs_bound_addr = 0;
                        rds_sock_put(rs);
                        ret = -ENOMEM;
                        break;
index 8398fee..8d19fd2 100644 (file)
@@ -219,7 +219,11 @@ void rds_cong_queue_updates(struct rds_cong_map *map)
        spin_lock_irqsave(&rds_cong_lock, flags);
 
        list_for_each_entry(conn, &map->m_conn_list, c_map_item) {
-               if (!test_and_set_bit(0, &conn->c_map_queued)) {
+               struct rds_conn_path *cp = &conn->c_path[0];
+
+               rcu_read_lock();
+               if (!test_and_set_bit(0, &conn->c_map_queued) &&
+                   !test_bit(RDS_DESTROY_PENDING, &cp->cp_flags)) {
                        rds_stats_inc(s_cong_update_queued);
                        /* We cannot inline the call to rds_send_xmit() here
                         * for two reasons (both pertaining to a TCP transport):
@@ -235,9 +239,9 @@ void rds_cong_queue_updates(struct rds_cong_map *map)
                         *    therefore trigger warnings.
                         * Defer the xmit to rds_send_worker() instead.
                         */
-                       queue_delayed_work(rds_wq,
-                                          &conn->c_path[0].cp_send_w, 0);
+                       queue_delayed_work(rds_wq, &cp->cp_send_w, 0);
                }
+               rcu_read_unlock();
        }
 
        spin_unlock_irqrestore(&rds_cong_lock, flags);
index 6492c0b..b10c0ef 100644 (file)
@@ -366,8 +366,6 @@ void rds_conn_shutdown(struct rds_conn_path *cp)
         * to the conn hash, so we never trigger a reconnect on this
         * conn - the reconnect is always triggered by the active peer. */
        cancel_delayed_work_sync(&cp->cp_conn_w);
-       if (conn->c_destroy_in_prog)
-               return;
        rcu_read_lock();
        if (!hlist_unhashed(&conn->c_hash_node)) {
                rcu_read_unlock();
@@ -384,10 +382,13 @@ static void rds_conn_path_destroy(struct rds_conn_path *cp)
 {
        struct rds_message *rm, *rtmp;
 
+       set_bit(RDS_DESTROY_PENDING, &cp->cp_flags);
+
        if (!cp->cp_transport_data)
                return;
 
        /* make sure lingering queued work won't try to ref the conn */
+       synchronize_rcu();
        cancel_delayed_work_sync(&cp->cp_send_w);
        cancel_delayed_work_sync(&cp->cp_recv_w);
 
@@ -405,6 +406,11 @@ static void rds_conn_path_destroy(struct rds_conn_path *cp)
        if (cp->cp_xmit_rm)
                rds_message_put(cp->cp_xmit_rm);
 
+       WARN_ON(delayed_work_pending(&cp->cp_send_w));
+       WARN_ON(delayed_work_pending(&cp->cp_recv_w));
+       WARN_ON(delayed_work_pending(&cp->cp_conn_w));
+       WARN_ON(work_pending(&cp->cp_down_w));
+
        cp->cp_conn->c_trans->conn_free(cp->cp_transport_data);
 }
 
@@ -426,7 +432,6 @@ void rds_conn_destroy(struct rds_connection *conn)
                 "%pI4\n", conn, &conn->c_laddr,
                 &conn->c_faddr);
 
-       conn->c_destroy_in_prog = 1;
        /* Ensure conn will not be scheduled for reconnect */
        spin_lock_irq(&rds_conn_lock);
        hlist_del_init_rcu(&conn->c_hash_node);
@@ -685,10 +690,13 @@ void rds_conn_path_drop(struct rds_conn_path *cp, bool destroy)
 {
        atomic_set(&cp->cp_state, RDS_CONN_ERROR);
 
-       if (!destroy && cp->cp_conn->c_destroy_in_prog)
+       rcu_read_lock();
+       if (!destroy && test_bit(RDS_DESTROY_PENDING, &cp->cp_flags)) {
+               rcu_read_unlock();
                return;
-
+       }
        queue_work(rds_wq, &cp->cp_down_w);
+       rcu_read_unlock();
 }
 EXPORT_SYMBOL_GPL(rds_conn_path_drop);
 
@@ -705,9 +713,15 @@ EXPORT_SYMBOL_GPL(rds_conn_drop);
  */
 void rds_conn_path_connect_if_down(struct rds_conn_path *cp)
 {
+       rcu_read_lock();
+       if (test_bit(RDS_DESTROY_PENDING, &cp->cp_flags)) {
+               rcu_read_unlock();
+               return;
+       }
        if (rds_conn_path_state(cp) == RDS_CONN_DOWN &&
            !test_and_set_bit(RDS_RECONNECT_PENDING, &cp->cp_flags))
                queue_delayed_work(rds_wq, &cp->cp_conn_w, 0);
+       rcu_read_unlock();
 }
 EXPORT_SYMBOL_GPL(rds_conn_path_connect_if_down);
 
index d09f6c1..374ae83 100644 (file)
@@ -88,6 +88,7 @@ enum {
 #define RDS_RECONNECT_PENDING  1
 #define RDS_IN_XMIT            2
 #define RDS_RECV_REFILL                3
+#define        RDS_DESTROY_PENDING     4
 
 /* Max number of multipaths per RDS connection. Must be a power of 2 */
 #define        RDS_MPATH_WORKERS       8
@@ -139,8 +140,7 @@ struct rds_connection {
        __be32                  c_faddr;
        unsigned int            c_loopback:1,
                                c_ping_triggered:1,
-                               c_destroy_in_prog:1,
-                               c_pad_to_32:29;
+                               c_pad_to_32:30;
        int                     c_npaths;
        struct rds_connection   *c_passive;
        struct rds_transport    *c_trans;
index b52cdc8..d3e32d1 100644 (file)
@@ -162,6 +162,12 @@ restart:
                goto out;
        }
 
+       if (test_bit(RDS_DESTROY_PENDING, &cp->cp_flags)) {
+               release_in_xmit(cp);
+               ret = -ENETUNREACH; /* dont requeue send work */
+               goto out;
+       }
+
        /*
         * we record the send generation after doing the xmit acquire.
         * if someone else manages to jump in and do some work, we'll use
@@ -437,7 +443,12 @@ over_batch:
                    !list_empty(&cp->cp_send_queue)) && !raced) {
                        if (batch_count < send_batch_count)
                                goto restart;
-                       queue_delayed_work(rds_wq, &cp->cp_send_w, 1);
+                       rcu_read_lock();
+                       if (test_bit(RDS_DESTROY_PENDING, &cp->cp_flags))
+                               ret = -ENETUNREACH;
+                       else
+                               queue_delayed_work(rds_wq, &cp->cp_send_w, 1);
+                       rcu_read_unlock();
                } else if (raced) {
                        rds_stats_inc(s_send_lock_queue_raced);
                }
@@ -1009,6 +1020,9 @@ static int rds_rdma_bytes(struct msghdr *msg, size_t *rdma_bytes)
                        continue;
 
                if (cmsg->cmsg_type == RDS_CMSG_RDMA_ARGS) {
+                       if (cmsg->cmsg_len <
+                           CMSG_LEN(sizeof(struct rds_rdma_args)))
+                               return -EINVAL;
                        args = CMSG_DATA(cmsg);
                        *rdma_bytes += args->remote_vec.bytes;
                }
@@ -1148,6 +1162,11 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
        else
                cpath = &conn->c_path[0];
 
+       if (test_bit(RDS_DESTROY_PENDING, &cpath->cp_flags)) {
+               ret = -EAGAIN;
+               goto out;
+       }
+
        rds_conn_path_connect_if_down(cpath);
 
        ret = rds_cong_wait(conn->c_fcong, dport, nonblock, rs);
@@ -1187,9 +1206,17 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
        rds_stats_inc(s_send_queued);
 
        ret = rds_send_xmit(cpath);
-       if (ret == -ENOMEM || ret == -EAGAIN)
-               queue_delayed_work(rds_wq, &cpath->cp_send_w, 1);
-
+       if (ret == -ENOMEM || ret == -EAGAIN) {
+               ret = 0;
+               rcu_read_lock();
+               if (test_bit(RDS_DESTROY_PENDING, &cpath->cp_flags))
+                       ret = -ENETUNREACH;
+               else
+                       queue_delayed_work(rds_wq, &cpath->cp_send_w, 1);
+               rcu_read_unlock();
+       }
+       if (ret)
+               goto out;
        rds_message_put(rm);
        return payload_len;
 
@@ -1267,7 +1294,10 @@ rds_send_probe(struct rds_conn_path *cp, __be16 sport,
        rds_stats_inc(s_send_pong);
 
        /* schedule the send work on rds_wq */
-       queue_delayed_work(rds_wq, &cp->cp_send_w, 1);
+       rcu_read_lock();
+       if (!test_bit(RDS_DESTROY_PENDING, &cp->cp_flags))
+               queue_delayed_work(rds_wq, &cp->cp_send_w, 1);
+       rcu_read_unlock();
 
        rds_message_put(rm);
        return 0;
index 39f502d..2e554ef 100644 (file)
@@ -270,16 +270,33 @@ static int rds_tcp_laddr_check(struct net *net, __be32 addr)
        return -EADDRNOTAVAIL;
 }
 
+static void rds_tcp_conn_free(void *arg)
+{
+       struct rds_tcp_connection *tc = arg;
+       unsigned long flags;
+
+       rdsdebug("freeing tc %p\n", tc);
+
+       spin_lock_irqsave(&rds_tcp_conn_lock, flags);
+       if (!tc->t_tcp_node_detached)
+               list_del(&tc->t_tcp_node);
+       spin_unlock_irqrestore(&rds_tcp_conn_lock, flags);
+
+       kmem_cache_free(rds_tcp_conn_slab, tc);
+}
+
 static int rds_tcp_conn_alloc(struct rds_connection *conn, gfp_t gfp)
 {
        struct rds_tcp_connection *tc;
-       int i;
+       int i, j;
+       int ret = 0;
 
        for (i = 0; i < RDS_MPATH_WORKERS; i++) {
                tc = kmem_cache_alloc(rds_tcp_conn_slab, gfp);
-               if (!tc)
-                       return -ENOMEM;
-
+               if (!tc) {
+                       ret = -ENOMEM;
+                       break;
+               }
                mutex_init(&tc->t_conn_path_lock);
                tc->t_sock = NULL;
                tc->t_tinc = NULL;
@@ -290,27 +307,17 @@ static int rds_tcp_conn_alloc(struct rds_connection *conn, gfp_t gfp)
                tc->t_cpath = &conn->c_path[i];
 
                spin_lock_irq(&rds_tcp_conn_lock);
+               tc->t_tcp_node_detached = false;
                list_add_tail(&tc->t_tcp_node, &rds_tcp_conn_list);
                spin_unlock_irq(&rds_tcp_conn_lock);
                rdsdebug("rds_conn_path [%d] tc %p\n", i,
                         conn->c_path[i].cp_transport_data);
        }
-
-       return 0;
-}
-
-static void rds_tcp_conn_free(void *arg)
-{
-       struct rds_tcp_connection *tc = arg;
-       unsigned long flags;
-       rdsdebug("freeing tc %p\n", tc);
-
-       spin_lock_irqsave(&rds_tcp_conn_lock, flags);
-       if (!tc->t_tcp_node_detached)
-               list_del(&tc->t_tcp_node);
-       spin_unlock_irqrestore(&rds_tcp_conn_lock, flags);
-
-       kmem_cache_free(rds_tcp_conn_slab, tc);
+       if (ret) {
+               for (j = 0; j < i; j++)
+                       rds_tcp_conn_free(conn->c_path[j].cp_transport_data);
+       }
+       return ret;
 }
 
 static bool list_has_conn(struct list_head *list, struct rds_connection *conn)
index 46f74da..534c67a 100644 (file)
@@ -170,7 +170,7 @@ void rds_tcp_conn_path_shutdown(struct rds_conn_path *cp)
                 cp->cp_conn, tc, sock);
 
        if (sock) {
-               if (cp->cp_conn->c_destroy_in_prog)
+               if (test_bit(RDS_DESTROY_PENDING, &cp->cp_flags))
                        rds_tcp_set_linger(sock);
                sock->ops->shutdown(sock, RCV_SHUTDOWN | SEND_SHUTDOWN);
                lock_sock(sock->sk);
index e006ef8..dd707b9 100644 (file)
@@ -321,8 +321,12 @@ void rds_tcp_data_ready(struct sock *sk)
        ready = tc->t_orig_data_ready;
        rds_tcp_stats_inc(s_tcp_data_ready_calls);
 
-       if (rds_tcp_read_sock(cp, GFP_ATOMIC) == -ENOMEM)
-               queue_delayed_work(rds_wq, &cp->cp_recv_w, 0);
+       if (rds_tcp_read_sock(cp, GFP_ATOMIC) == -ENOMEM) {
+               rcu_read_lock();
+               if (!test_bit(RDS_DESTROY_PENDING, &cp->cp_flags))
+                       queue_delayed_work(rds_wq, &cp->cp_recv_w, 0);
+               rcu_read_unlock();
+       }
 out:
        read_unlock_bh(&sk->sk_callback_lock);
        ready(sk);
index dc860d1..73c7476 100644 (file)
@@ -202,8 +202,11 @@ void rds_tcp_write_space(struct sock *sk)
        tc->t_last_seen_una = rds_tcp_snd_una(tc);
        rds_send_path_drop_acked(cp, rds_tcp_snd_una(tc), rds_tcp_is_acked);
 
-       if ((refcount_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf)
+       rcu_read_lock();
+       if ((refcount_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf &&
+           !test_bit(RDS_DESTROY_PENDING, &cp->cp_flags))
                queue_delayed_work(rds_wq, &cp->cp_send_w, 0);
+       rcu_read_unlock();
 
 out:
        read_unlock_bh(&sk->sk_callback_lock);
index f121daa..eb76db1 100644 (file)
@@ -87,8 +87,12 @@ void rds_connect_path_complete(struct rds_conn_path *cp, int curr)
 
        cp->cp_reconnect_jiffies = 0;
        set_bit(0, &cp->cp_conn->c_map_queued);
-       queue_delayed_work(rds_wq, &cp->cp_send_w, 0);
-       queue_delayed_work(rds_wq, &cp->cp_recv_w, 0);
+       rcu_read_lock();
+       if (!test_bit(RDS_DESTROY_PENDING, &cp->cp_flags)) {
+               queue_delayed_work(rds_wq, &cp->cp_send_w, 0);
+               queue_delayed_work(rds_wq, &cp->cp_recv_w, 0);
+       }
+       rcu_read_unlock();
 }
 EXPORT_SYMBOL_GPL(rds_connect_path_complete);
 
@@ -133,7 +137,10 @@ void rds_queue_reconnect(struct rds_conn_path *cp)
        set_bit(RDS_RECONNECT_PENDING, &cp->cp_flags);
        if (cp->cp_reconnect_jiffies == 0) {
                cp->cp_reconnect_jiffies = rds_sysctl_reconnect_min_jiffies;
-               queue_delayed_work(rds_wq, &cp->cp_conn_w, 0);
+               rcu_read_lock();
+               if (!test_bit(RDS_DESTROY_PENDING, &cp->cp_flags))
+                       queue_delayed_work(rds_wq, &cp->cp_conn_w, 0);
+               rcu_read_unlock();
                return;
        }
 
@@ -141,8 +148,11 @@ void rds_queue_reconnect(struct rds_conn_path *cp)
        rdsdebug("%lu delay %lu ceil conn %p for %pI4 -> %pI4\n",
                 rand % cp->cp_reconnect_jiffies, cp->cp_reconnect_jiffies,
                 conn, &conn->c_laddr, &conn->c_faddr);
-       queue_delayed_work(rds_wq, &cp->cp_conn_w,
-                          rand % cp->cp_reconnect_jiffies);
+       rcu_read_lock();
+       if (!test_bit(RDS_DESTROY_PENDING, &cp->cp_flags))
+               queue_delayed_work(rds_wq, &cp->cp_conn_w,
+                                  rand % cp->cp_reconnect_jiffies);
+       rcu_read_unlock();
 
        cp->cp_reconnect_jiffies = min(cp->cp_reconnect_jiffies * 2,
                                        rds_sysctl_reconnect_max_jiffies);
index bf483db..95d3c90 100644 (file)
@@ -118,13 +118,13 @@ static int tcf_act_police_init(struct net *net, struct nlattr *nla,
        police = to_police(*a);
        if (parm->rate.rate) {
                err = -ENOMEM;
-               R_tab = qdisc_get_rtab(&parm->rate, tb[TCA_POLICE_RATE]);
+               R_tab = qdisc_get_rtab(&parm->rate, tb[TCA_POLICE_RATE], NULL);
                if (R_tab == NULL)
                        goto failure;
 
                if (parm->peakrate.rate) {
                        P_tab = qdisc_get_rtab(&parm->peakrate,
-                                              tb[TCA_POLICE_PEAKRATE]);
+                                              tb[TCA_POLICE_PEAKRATE], NULL);
                        if (P_tab == NULL)
                                goto failure;
                }
index 32b1ea7..6708b69 100644 (file)
@@ -281,20 +281,24 @@ static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q,
 }
 
 int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
-                     struct tcf_block_ext_info *ei)
+                     struct tcf_block_ext_info *ei,
+                     struct netlink_ext_ack *extack)
 {
        struct tcf_block *block = kzalloc(sizeof(*block), GFP_KERNEL);
        struct tcf_chain *chain;
        int err;
 
-       if (!block)
+       if (!block) {
+               NL_SET_ERR_MSG(extack, "Memory allocation for block failed");
                return -ENOMEM;
+       }
        INIT_LIST_HEAD(&block->chain_list);
        INIT_LIST_HEAD(&block->cb_list);
 
        /* Create chain 0 by default, it has to be always present. */
        chain = tcf_chain_create(block, 0);
        if (!chain) {
+               NL_SET_ERR_MSG(extack, "Failed to create new tcf chain");
                err = -ENOMEM;
                goto err_chain_create;
        }
@@ -321,7 +325,8 @@ static void tcf_chain_head_change_dflt(struct tcf_proto *tp_head, void *priv)
 }
 
 int tcf_block_get(struct tcf_block **p_block,
-                 struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q)
+                 struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
+                 struct netlink_ext_ack *extack)
 {
        struct tcf_block_ext_info ei = {
                .chain_head_change = tcf_chain_head_change_dflt,
@@ -329,7 +334,7 @@ int tcf_block_get(struct tcf_block **p_block,
        };
 
        WARN_ON(!p_filter_chain);
-       return tcf_block_get_ext(p_block, q, &ei);
+       return tcf_block_get_ext(p_block, q, &ei, extack);
 }
 EXPORT_SYMBOL(tcf_block_get);
 
@@ -368,6 +373,8 @@ void tcf_block_put(struct tcf_block *block)
 {
        struct tcf_block_ext_info ei = {0, };
 
+       if (!block)
+               return;
        tcf_block_put_ext(block, block->q, &ei);
 }
 
@@ -793,7 +800,7 @@ replay:
        }
 
        /* And the last stroke */
-       block = cops->tcf_block(q, cl);
+       block = cops->tcf_block(q, cl, extack);
        if (!block) {
                err = -EINVAL;
                goto errout;
@@ -1040,7 +1047,7 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
                if (cl == 0)
                        goto out;
        }
-       block = cops->tcf_block(q, cl);
+       block = cops->tcf_block(q, cl, NULL);
        if (!block)
                goto out;
 
index 6fe798c..8d78e7f 100644 (file)
@@ -42,7 +42,6 @@ struct cls_bpf_prog {
        struct list_head link;
        struct tcf_result res;
        bool exts_integrated;
-       bool offloaded;
        u32 gen_flags;
        struct tcf_exts exts;
        u32 handle;
@@ -148,33 +147,37 @@ static bool cls_bpf_is_ebpf(const struct cls_bpf_prog *prog)
 }
 
 static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog,
-                              enum tc_clsbpf_command cmd)
+                              struct cls_bpf_prog *oldprog)
 {
-       bool addorrep = cmd == TC_CLSBPF_ADD || cmd == TC_CLSBPF_REPLACE;
        struct tcf_block *block = tp->chain->block;
-       bool skip_sw = tc_skip_sw(prog->gen_flags);
        struct tc_cls_bpf_offload cls_bpf = {};
+       struct cls_bpf_prog *obj;
+       bool skip_sw;
        int err;
 
+       skip_sw = prog && tc_skip_sw(prog->gen_flags);
+       obj = prog ?: oldprog;
+
        tc_cls_common_offload_init(&cls_bpf.common, tp);
-       cls_bpf.command = cmd;
-       cls_bpf.exts = &prog->exts;
-       cls_bpf.prog = prog->filter;
-       cls_bpf.name = prog->bpf_name;
-       cls_bpf.exts_integrated = prog->exts_integrated;
-       cls_bpf.gen_flags = prog->gen_flags;
+       cls_bpf.command = TC_CLSBPF_OFFLOAD;
+       cls_bpf.exts = &obj->exts;
+       cls_bpf.prog = prog ? prog->filter : NULL;
+       cls_bpf.oldprog = oldprog ? oldprog->filter : NULL;
+       cls_bpf.name = obj->bpf_name;
+       cls_bpf.exts_integrated = obj->exts_integrated;
+       cls_bpf.gen_flags = obj->gen_flags;
 
        err = tc_setup_cb_call(block, NULL, TC_SETUP_CLSBPF, &cls_bpf, skip_sw);
-       if (addorrep) {
+       if (prog) {
                if (err < 0) {
-                       cls_bpf_offload_cmd(tp, prog, TC_CLSBPF_DESTROY);
+                       cls_bpf_offload_cmd(tp, oldprog, prog);
                        return err;
                } else if (err > 0) {
                        prog->gen_flags |= TCA_CLS_FLAGS_IN_HW;
                }
        }
 
-       if (addorrep && skip_sw && !(prog->gen_flags & TCA_CLS_FLAGS_IN_HW))
+       if (prog && skip_sw && !(prog->gen_flags & TCA_CLS_FLAGS_IN_HW))
                return -EINVAL;
 
        return 0;
@@ -183,38 +186,17 @@ static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog,
 static int cls_bpf_offload(struct tcf_proto *tp, struct cls_bpf_prog *prog,
                           struct cls_bpf_prog *oldprog)
 {
-       struct cls_bpf_prog *obj = prog;
-       enum tc_clsbpf_command cmd;
-       bool skip_sw;
-       int ret;
-
-       skip_sw = tc_skip_sw(prog->gen_flags) ||
-               (oldprog && tc_skip_sw(oldprog->gen_flags));
-
-       if (oldprog && oldprog->offloaded) {
-               if (!tc_skip_hw(prog->gen_flags)) {
-                       cmd = TC_CLSBPF_REPLACE;
-               } else if (!tc_skip_sw(prog->gen_flags)) {
-                       obj = oldprog;
-                       cmd = TC_CLSBPF_DESTROY;
-               } else {
-                       return -EINVAL;
-               }
-       } else {
-               if (tc_skip_hw(prog->gen_flags))
-                       return skip_sw ? -EINVAL : 0;
-               cmd = TC_CLSBPF_ADD;
-       }
-
-       ret = cls_bpf_offload_cmd(tp, obj, cmd);
-       if (ret)
-               return ret;
+       if (prog && oldprog && prog->gen_flags != oldprog->gen_flags)
+               return -EINVAL;
 
-       obj->offloaded = true;
-       if (oldprog)
-               oldprog->offloaded = false;
+       if (prog && tc_skip_hw(prog->gen_flags))
+               prog = NULL;
+       if (oldprog && tc_skip_hw(oldprog->gen_flags))
+               oldprog = NULL;
+       if (!prog && !oldprog)
+               return 0;
 
-       return 0;
+       return cls_bpf_offload_cmd(tp, prog, oldprog);
 }
 
 static void cls_bpf_stop_offload(struct tcf_proto *tp,
@@ -222,25 +204,26 @@ static void cls_bpf_stop_offload(struct tcf_proto *tp,
 {
        int err;
 
-       if (!prog->offloaded)
-               return;
-
-       err = cls_bpf_offload_cmd(tp, prog, TC_CLSBPF_DESTROY);
-       if (err) {
+       err = cls_bpf_offload_cmd(tp, NULL, prog);
+       if (err)
                pr_err("Stopping hardware offload failed: %d\n", err);
-               return;
-       }
-
-       prog->offloaded = false;
 }
 
 static void cls_bpf_offload_update_stats(struct tcf_proto *tp,
                                         struct cls_bpf_prog *prog)
 {
-       if (!prog->offloaded)
-               return;
+       struct tcf_block *block = tp->chain->block;
+       struct tc_cls_bpf_offload cls_bpf = {};
+
+       tc_cls_common_offload_init(&cls_bpf.common, tp);
+       cls_bpf.command = TC_CLSBPF_STATS;
+       cls_bpf.exts = &prog->exts;
+       cls_bpf.prog = prog->filter;
+       cls_bpf.name = prog->bpf_name;
+       cls_bpf.exts_integrated = prog->exts_integrated;
+       cls_bpf.gen_flags = prog->gen_flags;
 
-       cls_bpf_offload_cmd(tp, prog, TC_CLSBPF_STATS);
+       tc_setup_cb_call(block, NULL, TC_SETUP_CLSBPF, &cls_bpf, false);
 }
 
 static int cls_bpf_init(struct tcf_proto *tp)
index 74c22b4..8a04c36 100644 (file)
@@ -393,13 +393,16 @@ static __u8 __detect_linklayer(struct tc_ratespec *r, __u32 *rtab)
 static struct qdisc_rate_table *qdisc_rtab_list;
 
 struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r,
-                                       struct nlattr *tab)
+                                       struct nlattr *tab,
+                                       struct netlink_ext_ack *extack)
 {
        struct qdisc_rate_table *rtab;
 
        if (tab == NULL || r->rate == 0 || r->cell_log == 0 ||
-           nla_len(tab) != TC_RTAB_SIZE)
+           nla_len(tab) != TC_RTAB_SIZE) {
+               NL_SET_ERR_MSG(extack, "Invalid rate table parameters for searching");
                return NULL;
+       }
 
        for (rtab = qdisc_rtab_list; rtab; rtab = rtab->next) {
                if (!memcmp(&rtab->rate, r, sizeof(struct tc_ratespec)) &&
@@ -418,6 +421,8 @@ struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r,
                        r->linklayer = __detect_linklayer(r, rtab->data);
                rtab->next = qdisc_rtab_list;
                qdisc_rtab_list = rtab;
+       } else {
+               NL_SET_ERR_MSG(extack, "Failed to allocate new qdisc rate table");
        }
        return rtab;
 }
@@ -449,7 +454,8 @@ static const struct nla_policy stab_policy[TCA_STAB_MAX + 1] = {
        [TCA_STAB_DATA] = { .type = NLA_BINARY },
 };
 
-static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt)
+static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt,
+                                              struct netlink_ext_ack *extack)
 {
        struct nlattr *tb[TCA_STAB_MAX + 1];
        struct qdisc_size_table *stab;
@@ -458,23 +464,29 @@ static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt)
        u16 *tab = NULL;
        int err;
 
-       err = nla_parse_nested(tb, TCA_STAB_MAX, opt, stab_policy, NULL);
+       err = nla_parse_nested(tb, TCA_STAB_MAX, opt, stab_policy, extack);
        if (err < 0)
                return ERR_PTR(err);
-       if (!tb[TCA_STAB_BASE])
+       if (!tb[TCA_STAB_BASE]) {
+               NL_SET_ERR_MSG(extack, "Size table base attribute is missing");
                return ERR_PTR(-EINVAL);
+       }
 
        s = nla_data(tb[TCA_STAB_BASE]);
 
        if (s->tsize > 0) {
-               if (!tb[TCA_STAB_DATA])
+               if (!tb[TCA_STAB_DATA]) {
+                       NL_SET_ERR_MSG(extack, "Size table data attribute is missing");
                        return ERR_PTR(-EINVAL);
+               }
                tab = nla_data(tb[TCA_STAB_DATA]);
                tsize = nla_len(tb[TCA_STAB_DATA]) / sizeof(u16);
        }
 
-       if (tsize != s->tsize || (!tab && tsize > 0))
+       if (tsize != s->tsize || (!tab && tsize > 0)) {
+               NL_SET_ERR_MSG(extack, "Invalid size of size table");
                return ERR_PTR(-EINVAL);
+       }
 
        list_for_each_entry(stab, &qdisc_stab_list, list) {
                if (memcmp(&stab->szopts, s, sizeof(*s)))
@@ -669,7 +681,7 @@ int qdisc_class_hash_init(struct Qdisc_class_hash *clhash)
        unsigned int size = 4;
 
        clhash->hash = qdisc_class_hash_alloc(size);
-       if (clhash->hash == NULL)
+       if (!clhash->hash)
                return -ENOMEM;
        clhash->hashsize  = size;
        clhash->hashmask  = size - 1;
@@ -795,11 +807,10 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
        tcm->tcm_info = refcount_read(&q->refcnt);
        if (nla_put_string(skb, TCA_KIND, q->ops->id))
                goto nla_put_failure;
-       if (nla_put_u8(skb, TCA_HW_OFFLOAD, !!(q->flags & TCQ_F_OFFLOADED)))
-               goto nla_put_failure;
        if (q->ops->dump && q->ops->dump(q, skb) < 0)
                goto nla_put_failure;
-
+       if (nla_put_u8(skb, TCA_HW_OFFLOAD, !!(q->flags & TCQ_F_OFFLOADED)))
+               goto nla_put_failure;
        qlen = qdisc_qlen_sum(q);
 
        stab = rtnl_dereference(q->stab);
@@ -899,7 +910,8 @@ static void notify_and_destroy(struct net *net, struct sk_buff *skb,
 
 static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
                       struct sk_buff *skb, struct nlmsghdr *n, u32 classid,
-                      struct Qdisc *new, struct Qdisc *old)
+                      struct Qdisc *new, struct Qdisc *old,
+                      struct netlink_ext_ack *extack)
 {
        struct Qdisc *q = old;
        struct net *net = dev_net(dev);
@@ -914,8 +926,10 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
                    (new && new->flags & TCQ_F_INGRESS)) {
                        num_q = 1;
                        ingress = 1;
-                       if (!dev_ingress_queue(dev))
+                       if (!dev_ingress_queue(dev)) {
+                               NL_SET_ERR_MSG(extack, "Device does not have an ingress queue");
                                return -ENOENT;
+                       }
                }
 
                if (dev->flags & IFF_UP)
@@ -966,10 +980,13 @@ skip:
                if (cops && cops->graft) {
                        unsigned long cl = cops->find(parent, classid);
 
-                       if (cl)
-                               err = cops->graft(parent, cl, new, &old);
-                       else
+                       if (cl) {
+                               err = cops->graft(parent, cl, new, &old,
+                                                 extack);
+                       } else {
+                               NL_SET_ERR_MSG(extack, "Specified class not found");
                                err = -ENOENT;
+                       }
                }
                if (!err)
                        notify_and_destroy(net, skb, n, classid, old, new);
@@ -990,7 +1007,8 @@ static struct lock_class_key qdisc_rx_lock;
 static struct Qdisc *qdisc_create(struct net_device *dev,
                                  struct netdev_queue *dev_queue,
                                  struct Qdisc *p, u32 parent, u32 handle,
-                                 struct nlattr **tca, int *errp)
+                                 struct nlattr **tca, int *errp,
+                                 struct netlink_ext_ack *extack)
 {
        int err;
        struct nlattr *kind = tca[TCA_KIND];
@@ -1028,10 +1046,12 @@ static struct Qdisc *qdisc_create(struct net_device *dev,
 #endif
 
        err = -ENOENT;
-       if (!ops)
+       if (!ops) {
+               NL_SET_ERR_MSG(extack, "Specified qdisc not found");
                goto err_out;
+       }
 
-       sch = qdisc_alloc(dev_queue, ops);
+       sch = qdisc_alloc(dev_queue, ops, extack);
        if (IS_ERR(sch)) {
                err = PTR_ERR(sch);
                goto err_out2;
@@ -1069,7 +1089,7 @@ static struct Qdisc *qdisc_create(struct net_device *dev,
        }
 
        if (ops->init) {
-               err = ops->init(sch, tca[TCA_OPTIONS]);
+               err = ops->init(sch, tca[TCA_OPTIONS], extack);
                if (err != 0)
                        goto err_out5;
        }
@@ -1086,7 +1106,7 @@ static struct Qdisc *qdisc_create(struct net_device *dev,
        }
 
        if (tca[TCA_STAB]) {
-               stab = qdisc_get_stab(tca[TCA_STAB]);
+               stab = qdisc_get_stab(tca[TCA_STAB], extack);
                if (IS_ERR(stab)) {
                        err = PTR_ERR(stab);
                        goto err_out4;
@@ -1097,8 +1117,10 @@ static struct Qdisc *qdisc_create(struct net_device *dev,
                seqcount_t *running;
 
                err = -EOPNOTSUPP;
-               if (sch->flags & TCQ_F_MQROOT)
+               if (sch->flags & TCQ_F_MQROOT) {
+                       NL_SET_ERR_MSG(extack, "Cannot attach rate estimator to a multi-queue root qdisc");
                        goto err_out4;
+               }
 
                if (sch->parent != TC_H_ROOT &&
                    !(sch->flags & TCQ_F_INGRESS) &&
@@ -1113,8 +1135,10 @@ static struct Qdisc *qdisc_create(struct net_device *dev,
                                        NULL,
                                        running,
                                        tca[TCA_RATE]);
-               if (err)
+               if (err) {
+                       NL_SET_ERR_MSG(extack, "Failed to generate new estimator");
                        goto err_out4;
+               }
        }
 
        qdisc_hash_add(sch, false);
@@ -1147,21 +1171,24 @@ err_out4:
        goto err_out3;
 }
 
-static int qdisc_change(struct Qdisc *sch, struct nlattr **tca)
+static int qdisc_change(struct Qdisc *sch, struct nlattr **tca,
+                       struct netlink_ext_ack *extack)
 {
        struct qdisc_size_table *ostab, *stab = NULL;
        int err = 0;
 
        if (tca[TCA_OPTIONS]) {
-               if (!sch->ops->change)
+               if (!sch->ops->change) {
+                       NL_SET_ERR_MSG(extack, "Change operation not supported by specified qdisc");
                        return -EINVAL;
-               err = sch->ops->change(sch, tca[TCA_OPTIONS]);
+               }
+               err = sch->ops->change(sch, tca[TCA_OPTIONS], extack);
                if (err)
                        return err;
        }
 
        if (tca[TCA_STAB]) {
-               stab = qdisc_get_stab(tca[TCA_STAB]);
+               stab = qdisc_get_stab(tca[TCA_STAB], extack);
                if (IS_ERR(stab))
                        return PTR_ERR(stab);
        }
@@ -1259,8 +1286,10 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
                if (clid != TC_H_ROOT) {
                        if (TC_H_MAJ(clid) != TC_H_MAJ(TC_H_INGRESS)) {
                                p = qdisc_lookup(dev, TC_H_MAJ(clid));
-                               if (!p)
+                               if (!p) {
+                                       NL_SET_ERR_MSG(extack, "Failed to find qdisc with specified classid");
                                        return -ENOENT;
+                               }
                                q = qdisc_leaf(p, clid);
                        } else if (dev_ingress_queue(dev)) {
                                q = dev_ingress_queue(dev)->qdisc_sleeping;
@@ -1268,26 +1297,38 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
                } else {
                        q = dev->qdisc;
                }
-               if (!q)
+               if (!q) {
+                       NL_SET_ERR_MSG(extack, "Cannot find specified qdisc on specified device");
                        return -ENOENT;
+               }
 
-               if (tcm->tcm_handle && q->handle != tcm->tcm_handle)
+               if (tcm->tcm_handle && q->handle != tcm->tcm_handle) {
+                       NL_SET_ERR_MSG(extack, "Invalid handle");
                        return -EINVAL;
+               }
        } else {
                q = qdisc_lookup(dev, tcm->tcm_handle);
-               if (!q)
+               if (!q) {
+                       NL_SET_ERR_MSG(extack, "Failed to find qdisc with specified handle");
                        return -ENOENT;
+               }
        }
 
-       if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
+       if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id)) {
+               NL_SET_ERR_MSG(extack, "Invalid qdisc name");
                return -EINVAL;
+       }
 
        if (n->nlmsg_type == RTM_DELQDISC) {
-               if (!clid)
+               if (!clid) {
+                       NL_SET_ERR_MSG(extack, "Classid cannot be zero");
                        return -EINVAL;
-               if (q->handle == 0)
+               }
+               if (q->handle == 0) {
+                       NL_SET_ERR_MSG(extack, "Cannot delete qdisc with handle of zero");
                        return -ENOENT;
-               err = qdisc_graft(dev, p, skb, n, clid, NULL, q);
+               }
+               err = qdisc_graft(dev, p, skb, n, clid, NULL, q, extack);
                if (err != 0)
                        return err;
        } else {
@@ -1333,8 +1374,10 @@ replay:
                if (clid != TC_H_ROOT) {
                        if (clid != TC_H_INGRESS) {
                                p = qdisc_lookup(dev, TC_H_MAJ(clid));
-                               if (!p)
+                               if (!p) {
+                                       NL_SET_ERR_MSG(extack, "Failed to find specified qdisc");
                                        return -ENOENT;
+                               }
                                q = qdisc_leaf(p, clid);
                        } else if (dev_ingress_queue_create(dev)) {
                                q = dev_ingress_queue(dev)->qdisc_sleeping;
@@ -1349,21 +1392,31 @@ replay:
 
                if (!q || !tcm->tcm_handle || q->handle != tcm->tcm_handle) {
                        if (tcm->tcm_handle) {
-                               if (q && !(n->nlmsg_flags & NLM_F_REPLACE))
+                               if (q && !(n->nlmsg_flags & NLM_F_REPLACE)) {
+                                       NL_SET_ERR_MSG(extack, "NLM_F_REPLACE needed to override");
                                        return -EEXIST;
-                               if (TC_H_MIN(tcm->tcm_handle))
+                               }
+                               if (TC_H_MIN(tcm->tcm_handle)) {
+                                       NL_SET_ERR_MSG(extack, "Invalid minor handle");
                                        return -EINVAL;
+                               }
                                q = qdisc_lookup(dev, tcm->tcm_handle);
                                if (!q)
                                        goto create_n_graft;
-                               if (n->nlmsg_flags & NLM_F_EXCL)
+                               if (n->nlmsg_flags & NLM_F_EXCL) {
+                                       NL_SET_ERR_MSG(extack, "Exclusivity flag on, cannot override");
                                        return -EEXIST;
+                               }
                                if (tca[TCA_KIND] &&
-                                   nla_strcmp(tca[TCA_KIND], q->ops->id))
+                                   nla_strcmp(tca[TCA_KIND], q->ops->id)) {
+                                       NL_SET_ERR_MSG(extack, "Invalid qdisc name");
                                        return -EINVAL;
+                               }
                                if (q == p ||
-                                   (p && check_loop(q, p, 0)))
+                                   (p && check_loop(q, p, 0))) {
+                                       NL_SET_ERR_MSG(extack, "Qdisc parent/child loop detected");
                                        return -ELOOP;
+                               }
                                qdisc_refcount_inc(q);
                                goto graft;
                        } else {
@@ -1398,33 +1451,45 @@ replay:
                        }
                }
        } else {
-               if (!tcm->tcm_handle)
+               if (!tcm->tcm_handle) {
+                       NL_SET_ERR_MSG(extack, "Handle cannot be zero");
                        return -EINVAL;
+               }
                q = qdisc_lookup(dev, tcm->tcm_handle);
        }
 
        /* Change qdisc parameters */
-       if (!q)
+       if (!q) {
+               NL_SET_ERR_MSG(extack, "Specified qdisc not found");
                return -ENOENT;
-       if (n->nlmsg_flags & NLM_F_EXCL)
+       }
+       if (n->nlmsg_flags & NLM_F_EXCL) {
+               NL_SET_ERR_MSG(extack, "Exclusivity flag on, cannot modify");
                return -EEXIST;
-       if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
+       }
+       if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id)) {
+               NL_SET_ERR_MSG(extack, "Invalid qdisc name");
                return -EINVAL;
-       err = qdisc_change(q, tca);
+       }
+       err = qdisc_change(q, tca, extack);
        if (err == 0)
                qdisc_notify(net, skb, n, clid, NULL, q);
        return err;
 
 create_n_graft:
-       if (!(n->nlmsg_flags & NLM_F_CREATE))
+       if (!(n->nlmsg_flags & NLM_F_CREATE)) {
+               NL_SET_ERR_MSG(extack, "Qdisc not found. To create specify NLM_F_CREATE flag");
                return -ENOENT;
+       }
        if (clid == TC_H_INGRESS) {
-               if (dev_ingress_queue(dev))
+               if (dev_ingress_queue(dev)) {
                        q = qdisc_create(dev, dev_ingress_queue(dev), p,
                                         tcm->tcm_parent, tcm->tcm_parent,
-                                        tca, &err);
-               else
+                                        tca, &err, extack);
+               } else {
+                       NL_SET_ERR_MSG(extack, "Cannot find ingress queue for specified device");
                        err = -ENOENT;
+               }
        } else {
                struct netdev_queue *dev_queue;
 
@@ -1437,7 +1502,7 @@ create_n_graft:
 
                q = qdisc_create(dev, dev_queue, p,
                                 tcm->tcm_parent, tcm->tcm_handle,
-                                tca, &err);
+                                tca, &err, extack);
        }
        if (q == NULL) {
                if (err == -EAGAIN)
@@ -1446,7 +1511,7 @@ create_n_graft:
        }
 
 graft:
-       err = qdisc_graft(dev, p, skb, n, clid, q, NULL);
+       err = qdisc_graft(dev, p, skb, n, clid, q, NULL, extack);
        if (err) {
                if (q)
                        qdisc_destroy(q);
@@ -1698,7 +1763,7 @@ static void tc_bind_tclass(struct Qdisc *q, u32 portid, u32 clid,
        cl = cops->find(q, portid);
        if (!cl)
                return;
-       block = cops->tcf_block(q, cl);
+       block = cops->tcf_block(q, cl, NULL);
        if (!block)
                return;
        list_for_each_entry(chain, &block->chain_list, list) {
@@ -1845,7 +1910,7 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n,
        new_cl = cl;
        err = -EOPNOTSUPP;
        if (cops->change)
-               err = cops->change(q, clid, portid, tca, &new_cl);
+               err = cops->change(q, clid, portid, tca, &new_cl, extack);
        if (err == 0) {
                tclass_notify(net, skb, n, q, new_cl, RTM_NEWTCLASS);
                /* We just create a new class, need to do reverse binding. */
index 2dbd249..cd49afc 100644 (file)
@@ -82,7 +82,8 @@ static inline struct atm_flow_data *lookup_flow(struct Qdisc *sch, u32 classid)
 }
 
 static int atm_tc_graft(struct Qdisc *sch, unsigned long arg,
-                       struct Qdisc *new, struct Qdisc **old)
+                       struct Qdisc *new, struct Qdisc **old,
+                       struct netlink_ext_ack *extack)
 {
        struct atm_qdisc_data *p = qdisc_priv(sch);
        struct atm_flow_data *flow = (struct atm_flow_data *)arg;
@@ -191,7 +192,8 @@ static const struct nla_policy atm_policy[TCA_ATM_MAX + 1] = {
 };
 
 static int atm_tc_change(struct Qdisc *sch, u32 classid, u32 parent,
-                        struct nlattr **tca, unsigned long *arg)
+                        struct nlattr **tca, unsigned long *arg,
+                        struct netlink_ext_ack *extack)
 {
        struct atm_qdisc_data *p = qdisc_priv(sch);
        struct atm_flow_data *flow = (struct atm_flow_data *)*arg;
@@ -281,13 +283,15 @@ static int atm_tc_change(struct Qdisc *sch, u32 classid, u32 parent,
                goto err_out;
        }
 
-       error = tcf_block_get(&flow->block, &flow->filter_list, sch);
+       error = tcf_block_get(&flow->block, &flow->filter_list, sch,
+                             extack);
        if (error) {
                kfree(flow);
                goto err_out;
        }
 
-       flow->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, classid);
+       flow->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, classid,
+                                   extack);
        if (!flow->q)
                flow->q = &noop_qdisc;
        pr_debug("atm_tc_change: qdisc %p\n", flow->q);
@@ -356,7 +360,8 @@ static void atm_tc_walk(struct Qdisc *sch, struct qdisc_walker *walker)
        }
 }
 
-static struct tcf_block *atm_tc_tcf_block(struct Qdisc *sch, unsigned long cl)
+static struct tcf_block *atm_tc_tcf_block(struct Qdisc *sch, unsigned long cl,
+                                         struct netlink_ext_ack *extack)
 {
        struct atm_qdisc_data *p = qdisc_priv(sch);
        struct atm_flow_data *flow = (struct atm_flow_data *)cl;
@@ -531,7 +536,8 @@ static struct sk_buff *atm_tc_peek(struct Qdisc *sch)
        return p->link.q->ops->peek(p->link.q);
 }
 
-static int atm_tc_init(struct Qdisc *sch, struct nlattr *opt)
+static int atm_tc_init(struct Qdisc *sch, struct nlattr *opt,
+                      struct netlink_ext_ack *extack)
 {
        struct atm_qdisc_data *p = qdisc_priv(sch);
        int err;
@@ -541,12 +547,13 @@ static int atm_tc_init(struct Qdisc *sch, struct nlattr *opt)
        INIT_LIST_HEAD(&p->link.list);
        list_add(&p->link.list, &p->flows);
        p->link.q = qdisc_create_dflt(sch->dev_queue,
-                                     &pfifo_qdisc_ops, sch->handle);
+                                     &pfifo_qdisc_ops, sch->handle, extack);
        if (!p->link.q)
                p->link.q = &noop_qdisc;
        pr_debug("atm_tc_init: link (%p) qdisc %p\n", &p->link, p->link.q);
 
-       err = tcf_block_get(&p->link.block, &p->link.filter_list, sch);
+       err = tcf_block_get(&p->link.block, &p->link.filter_list, sch,
+                           extack);
        if (err)
                return err;
 
index 525eb3a..f42025d 100644 (file)
@@ -1132,7 +1132,8 @@ static const struct nla_policy cbq_policy[TCA_CBQ_MAX + 1] = {
        [TCA_CBQ_POLICE]        = { .len = sizeof(struct tc_cbq_police) },
 };
 
-static int cbq_init(struct Qdisc *sch, struct nlattr *opt)
+static int cbq_init(struct Qdisc *sch, struct nlattr *opt,
+                   struct netlink_ext_ack *extack)
 {
        struct cbq_sched_data *q = qdisc_priv(sch);
        struct nlattr *tb[TCA_CBQ_MAX + 1];
@@ -1143,22 +1144,27 @@ static int cbq_init(struct Qdisc *sch, struct nlattr *opt)
        hrtimer_init(&q->delay_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
        q->delay_timer.function = cbq_undelay;
 
-       if (!opt)
+       if (!opt) {
+               NL_SET_ERR_MSG(extack, "CBQ options are required for this operation");
                return -EINVAL;
+       }
 
-       err = nla_parse_nested(tb, TCA_CBQ_MAX, opt, cbq_policy, NULL);
+       err = nla_parse_nested(tb, TCA_CBQ_MAX, opt, cbq_policy, extack);
        if (err < 0)
                return err;
 
-       if (tb[TCA_CBQ_RTAB] == NULL || tb[TCA_CBQ_RATE] == NULL)
+       if (!tb[TCA_CBQ_RTAB] || !tb[TCA_CBQ_RATE]) {
+               NL_SET_ERR_MSG(extack, "Rate specification missing or incomplete");
                return -EINVAL;
+       }
 
        r = nla_data(tb[TCA_CBQ_RATE]);
 
-       if ((q->link.R_tab = qdisc_get_rtab(r, tb[TCA_CBQ_RTAB])) == NULL)
+       q->link.R_tab = qdisc_get_rtab(r, tb[TCA_CBQ_RTAB], extack);
+       if (!q->link.R_tab)
                return -EINVAL;
 
-       err = tcf_block_get(&q->link.block, &q->link.filter_list, sch);
+       err = tcf_block_get(&q->link.block, &q->link.filter_list, sch, extack);
        if (err)
                goto put_rtab;
 
@@ -1170,7 +1176,7 @@ static int cbq_init(struct Qdisc *sch, struct nlattr *opt)
        q->link.common.classid = sch->handle;
        q->link.qdisc = sch;
        q->link.q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
-                                     sch->handle);
+                                     sch->handle, NULL);
        if (!q->link.q)
                q->link.q = &noop_qdisc;
        else
@@ -1369,13 +1375,13 @@ cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
 }
 
 static int cbq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
-                    struct Qdisc **old)
+                    struct Qdisc **old, struct netlink_ext_ack *extack)
 {
        struct cbq_class *cl = (struct cbq_class *)arg;
 
        if (new == NULL) {
-               new = qdisc_create_dflt(sch->dev_queue,
-                                       &pfifo_qdisc_ops, cl->common.classid);
+               new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
+                                       cl->common.classid, extack);
                if (new == NULL)
                        return -ENOBUFS;
        }
@@ -1450,7 +1456,7 @@ static void cbq_destroy(struct Qdisc *sch)
 
 static int
 cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **tca,
-                unsigned long *arg)
+                unsigned long *arg, struct netlink_ext_ack *extack)
 {
        int err;
        struct cbq_sched_data *q = qdisc_priv(sch);
@@ -1460,29 +1466,37 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
        struct cbq_class *parent;
        struct qdisc_rate_table *rtab = NULL;
 
-       if (opt == NULL)
+       if (!opt) {
+               NL_SET_ERR_MSG(extack, "Mandatory qdisc options missing");
                return -EINVAL;
+       }
 
-       err = nla_parse_nested(tb, TCA_CBQ_MAX, opt, cbq_policy, NULL);
+       err = nla_parse_nested(tb, TCA_CBQ_MAX, opt, cbq_policy, extack);
        if (err < 0)
                return err;
 
-       if (tb[TCA_CBQ_OVL_STRATEGY] || tb[TCA_CBQ_POLICE])
+       if (tb[TCA_CBQ_OVL_STRATEGY] || tb[TCA_CBQ_POLICE]) {
+               NL_SET_ERR_MSG(extack, "Neither overlimit strategy nor policing attributes can be used for changing class params");
                return -EOPNOTSUPP;
+       }
 
        if (cl) {
                /* Check parent */
                if (parentid) {
                        if (cl->tparent &&
-                           cl->tparent->common.classid != parentid)
+                           cl->tparent->common.classid != parentid) {
+                               NL_SET_ERR_MSG(extack, "Invalid parent id");
                                return -EINVAL;
-                       if (!cl->tparent && parentid != TC_H_ROOT)
+                       }
+                       if (!cl->tparent && parentid != TC_H_ROOT) {
+                               NL_SET_ERR_MSG(extack, "Parent must be root");
                                return -EINVAL;
+                       }
                }
 
                if (tb[TCA_CBQ_RATE]) {
                        rtab = qdisc_get_rtab(nla_data(tb[TCA_CBQ_RATE]),
-                                             tb[TCA_CBQ_RTAB]);
+                                             tb[TCA_CBQ_RTAB], extack);
                        if (rtab == NULL)
                                return -EINVAL;
                }
@@ -1494,6 +1508,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
                                                    qdisc_root_sleeping_running(sch),
                                                    tca[TCA_RATE]);
                        if (err) {
+                               NL_SET_ERR_MSG(extack, "Failed to replace specified rate estimator");
                                qdisc_put_rtab(rtab);
                                return err;
                        }
@@ -1532,19 +1547,23 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
        if (parentid == TC_H_ROOT)
                return -EINVAL;
 
-       if (tb[TCA_CBQ_WRROPT] == NULL || tb[TCA_CBQ_RATE] == NULL ||
-           tb[TCA_CBQ_LSSOPT] == NULL)
+       if (!tb[TCA_CBQ_WRROPT] || !tb[TCA_CBQ_RATE] || !tb[TCA_CBQ_LSSOPT]) {
+               NL_SET_ERR_MSG(extack, "One of the following attributes MUST be specified: WRR, rate or link sharing");
                return -EINVAL;
+       }
 
-       rtab = qdisc_get_rtab(nla_data(tb[TCA_CBQ_RATE]), tb[TCA_CBQ_RTAB]);
+       rtab = qdisc_get_rtab(nla_data(tb[TCA_CBQ_RATE]), tb[TCA_CBQ_RTAB],
+                             extack);
        if (rtab == NULL)
                return -EINVAL;
 
        if (classid) {
                err = -EINVAL;
                if (TC_H_MAJ(classid ^ sch->handle) ||
-                   cbq_class_lookup(q, classid))
+                   cbq_class_lookup(q, classid)) {
+                       NL_SET_ERR_MSG(extack, "Specified class not found");
                        goto failure;
+               }
        } else {
                int i;
                classid = TC_H_MAKE(sch->handle, 0x8000);
@@ -1556,8 +1575,10 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
                                break;
                }
                err = -ENOSR;
-               if (i >= 0x8000)
+               if (i >= 0x8000) {
+                       NL_SET_ERR_MSG(extack, "Unable to generate classid");
                        goto failure;
+               }
                classid = classid|q->hgenerator;
        }
 
@@ -1565,8 +1586,10 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
        if (parentid) {
                parent = cbq_class_lookup(q, parentid);
                err = -EINVAL;
-               if (parent == NULL)
+               if (!parent) {
+                       NL_SET_ERR_MSG(extack, "Failed to find parentid");
                        goto failure;
+               }
        }
 
        err = -ENOBUFS;
@@ -1574,7 +1597,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
        if (cl == NULL)
                goto failure;
 
-       err = tcf_block_get(&cl->block, &cl->filter_list, sch);
+       err = tcf_block_get(&cl->block, &cl->filter_list, sch, extack);
        if (err) {
                kfree(cl);
                return err;
@@ -1586,6 +1609,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
                                        qdisc_root_sleeping_running(sch),
                                        tca[TCA_RATE]);
                if (err) {
+                       NL_SET_ERR_MSG(extack, "Couldn't create new estimator");
                        tcf_block_put(cl->block);
                        kfree(cl);
                        goto failure;
@@ -1594,7 +1618,8 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
 
        cl->R_tab = rtab;
        rtab = NULL;
-       cl->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, classid);
+       cl->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, classid,
+                                 NULL);
        if (!cl->q)
                cl->q = &noop_qdisc;
        else
@@ -1678,7 +1703,8 @@ static int cbq_delete(struct Qdisc *sch, unsigned long arg)
        return 0;
 }
 
-static struct tcf_block *cbq_tcf_block(struct Qdisc *sch, unsigned long arg)
+static struct tcf_block *cbq_tcf_block(struct Qdisc *sch, unsigned long arg,
+                                      struct netlink_ext_ack *extack)
 {
        struct cbq_sched_data *q = qdisc_priv(sch);
        struct cbq_class *cl = (struct cbq_class *)arg;
index 7a72980..cdd96b9 100644 (file)
@@ -219,14 +219,17 @@ static void cbs_disable_offload(struct net_device *dev,
 }
 
 static int cbs_enable_offload(struct net_device *dev, struct cbs_sched_data *q,
-                             const struct tc_cbs_qopt *opt)
+                             const struct tc_cbs_qopt *opt,
+                             struct netlink_ext_ack *extack)
 {
        const struct net_device_ops *ops = dev->netdev_ops;
        struct tc_cbs_qopt_offload cbs = { };
        int err;
 
-       if (!ops->ndo_setup_tc)
+       if (!ops->ndo_setup_tc) {
+               NL_SET_ERR_MSG(extack, "Specified device does not support cbs offload");
                return -EOPNOTSUPP;
+       }
 
        cbs.queue = q->queue;
 
@@ -237,8 +240,10 @@ static int cbs_enable_offload(struct net_device *dev, struct cbs_sched_data *q,
        cbs.sendslope = opt->sendslope;
 
        err = ops->ndo_setup_tc(dev, TC_SETUP_QDISC_CBS, &cbs);
-       if (err < 0)
+       if (err < 0) {
+               NL_SET_ERR_MSG(extack, "Specified device failed to setup cbs hardware offload");
                return err;
+       }
 
        q->enqueue = cbs_enqueue_offload;
        q->dequeue = cbs_dequeue_offload;
@@ -246,7 +251,8 @@ static int cbs_enable_offload(struct net_device *dev, struct cbs_sched_data *q,
        return 0;
 }
 
-static int cbs_change(struct Qdisc *sch, struct nlattr *opt)
+static int cbs_change(struct Qdisc *sch, struct nlattr *opt,
+                     struct netlink_ext_ack *extack)
 {
        struct cbs_sched_data *q = qdisc_priv(sch);
        struct net_device *dev = qdisc_dev(sch);
@@ -254,12 +260,14 @@ static int cbs_change(struct Qdisc *sch, struct nlattr *opt)
        struct tc_cbs_qopt *qopt;
        int err;
 
-       err = nla_parse_nested(tb, TCA_CBS_MAX, opt, cbs_policy, NULL);
+       err = nla_parse_nested(tb, TCA_CBS_MAX, opt, cbs_policy, extack);
        if (err < 0)
                return err;
 
-       if (!tb[TCA_CBS_PARMS])
+       if (!tb[TCA_CBS_PARMS]) {
+               NL_SET_ERR_MSG(extack, "Missing CBS parameter which are mandatory");
                return -EINVAL;
+       }
 
        qopt = nla_data(tb[TCA_CBS_PARMS]);
 
@@ -276,7 +284,7 @@ static int cbs_change(struct Qdisc *sch, struct nlattr *opt)
 
                cbs_disable_offload(dev, q);
        } else {
-               err = cbs_enable_offload(dev, q, qopt);
+               err = cbs_enable_offload(dev, q, qopt, extack);
                if (err < 0)
                        return err;
        }
@@ -291,13 +299,16 @@ static int cbs_change(struct Qdisc *sch, struct nlattr *opt)
        return 0;
 }
 
-static int cbs_init(struct Qdisc *sch, struct nlattr *opt)
+static int cbs_init(struct Qdisc *sch, struct nlattr *opt,
+                   struct netlink_ext_ack *extack)
 {
        struct cbs_sched_data *q = qdisc_priv(sch);
        struct net_device *dev = qdisc_dev(sch);
 
-       if (!opt)
+       if (!opt) {
+               NL_SET_ERR_MSG(extack, "Missing CBS qdisc options  which are mandatory");
                return -EINVAL;
+       }
 
        q->queue = sch->dev_queue - netdev_get_tx_queue(dev, 0);
 
@@ -306,7 +317,7 @@ static int cbs_init(struct Qdisc *sch, struct nlattr *opt)
 
        qdisc_watchdog_init(&q->watchdog, sch);
 
-       return cbs_change(sch, opt);
+       return cbs_change(sch, opt, extack);
 }
 
 static void cbs_destroy(struct Qdisc *sch)
index 531250f..eafc0d1 100644 (file)
@@ -344,7 +344,8 @@ static void choke_free(void *addr)
        kvfree(addr);
 }
 
-static int choke_change(struct Qdisc *sch, struct nlattr *opt)
+static int choke_change(struct Qdisc *sch, struct nlattr *opt,
+                       struct netlink_ext_ack *extack)
 {
        struct choke_sched_data *q = qdisc_priv(sch);
        struct nlattr *tb[TCA_CHOKE_MAX + 1];
@@ -431,9 +432,10 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt)
        return 0;
 }
 
-static int choke_init(struct Qdisc *sch, struct nlattr *opt)
+static int choke_init(struct Qdisc *sch, struct nlattr *opt,
+                     struct netlink_ext_ack *extack)
 {
-       return choke_change(sch, opt);
+       return choke_change(sch, opt, extack);
 }
 
 static int choke_dump(struct Qdisc *sch, struct sk_buff *skb)
index c518a1e..17cd81f 100644 (file)
@@ -130,7 +130,8 @@ static const struct nla_policy codel_policy[TCA_CODEL_MAX + 1] = {
        [TCA_CODEL_CE_THRESHOLD]= { .type = NLA_U32 },
 };
 
-static int codel_change(struct Qdisc *sch, struct nlattr *opt)
+static int codel_change(struct Qdisc *sch, struct nlattr *opt,
+                       struct netlink_ext_ack *extack)
 {
        struct codel_sched_data *q = qdisc_priv(sch);
        struct nlattr *tb[TCA_CODEL_MAX + 1];
@@ -184,7 +185,8 @@ static int codel_change(struct Qdisc *sch, struct nlattr *opt)
        return 0;
 }
 
-static int codel_init(struct Qdisc *sch, struct nlattr *opt)
+static int codel_init(struct Qdisc *sch, struct nlattr *opt,
+                     struct netlink_ext_ack *extack)
 {
        struct codel_sched_data *q = qdisc_priv(sch);
 
@@ -196,7 +198,7 @@ static int codel_init(struct Qdisc *sch, struct nlattr *opt)
        q->params.mtu = psched_mtu(qdisc_dev(sch));
 
        if (opt) {
-               int err = codel_change(sch, opt);
+               int err = codel_change(sch, opt, extack);
 
                if (err)
                        return err;
index 5bbcef3..e0b0cf8 100644 (file)
@@ -64,7 +64,8 @@ static const struct nla_policy drr_policy[TCA_DRR_MAX + 1] = {
 };
 
 static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
-                           struct nlattr **tca, unsigned long *arg)
+                           struct nlattr **tca, unsigned long *arg,
+                           struct netlink_ext_ack *extack)
 {
        struct drr_sched *q = qdisc_priv(sch);
        struct drr_class *cl = (struct drr_class *)*arg;
@@ -73,17 +74,21 @@ static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
        u32 quantum;
        int err;
 
-       if (!opt)
+       if (!opt) {
+               NL_SET_ERR_MSG(extack, "DRR options are required for this operation");
                return -EINVAL;
+       }
 
-       err = nla_parse_nested(tb, TCA_DRR_MAX, opt, drr_policy, NULL);
+       err = nla_parse_nested(tb, TCA_DRR_MAX, opt, drr_policy, extack);
        if (err < 0)
                return err;
 
        if (tb[TCA_DRR_QUANTUM]) {
                quantum = nla_get_u32(tb[TCA_DRR_QUANTUM]);
-               if (quantum == 0)
+               if (quantum == 0) {
+                       NL_SET_ERR_MSG(extack, "Specified DRR quantum cannot be zero");
                        return -EINVAL;
+               }
        } else
                quantum = psched_mtu(qdisc_dev(sch));
 
@@ -94,8 +99,10 @@ static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
                                                    NULL,
                                                    qdisc_root_sleeping_running(sch),
                                                    tca[TCA_RATE]);
-                       if (err)
+                       if (err) {
+                               NL_SET_ERR_MSG(extack, "Failed to replace estimator");
                                return err;
+                       }
                }
 
                sch_tree_lock(sch);
@@ -113,7 +120,8 @@ static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
        cl->common.classid = classid;
        cl->quantum        = quantum;
        cl->qdisc          = qdisc_create_dflt(sch->dev_queue,
-                                              &pfifo_qdisc_ops, classid);
+                                              &pfifo_qdisc_ops, classid,
+                                              NULL);
        if (cl->qdisc == NULL)
                cl->qdisc = &noop_qdisc;
        else
@@ -125,6 +133,7 @@ static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
                                            qdisc_root_sleeping_running(sch),
                                            tca[TCA_RATE]);
                if (err) {
+                       NL_SET_ERR_MSG(extack, "Failed to replace estimator");
                        qdisc_destroy(cl->qdisc);
                        kfree(cl);
                        return err;
@@ -172,12 +181,15 @@ static unsigned long drr_search_class(struct Qdisc *sch, u32 classid)
        return (unsigned long)drr_find_class(sch, classid);
 }
 
-static struct tcf_block *drr_tcf_block(struct Qdisc *sch, unsigned long cl)
+static struct tcf_block *drr_tcf_block(struct Qdisc *sch, unsigned long cl,
+                                      struct netlink_ext_ack *extack)
 {
        struct drr_sched *q = qdisc_priv(sch);
 
-       if (cl)
+       if (cl) {
+               NL_SET_ERR_MSG(extack, "DRR classid must be zero");
                return NULL;
+       }
 
        return q->block;
 }
@@ -201,13 +213,14 @@ static void drr_unbind_tcf(struct Qdisc *sch, unsigned long arg)
 }
 
 static int drr_graft_class(struct Qdisc *sch, unsigned long arg,
-                          struct Qdisc *new, struct Qdisc **old)
+                          struct Qdisc *new, struct Qdisc **old,
+                          struct netlink_ext_ack *extack)
 {
        struct drr_class *cl = (struct drr_class *)arg;
 
        if (new == NULL) {
-               new = qdisc_create_dflt(sch->dev_queue,
-                                       &pfifo_qdisc_ops, cl->common.classid);
+               new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
+                                       cl->common.classid, NULL);
                if (new == NULL)
                        new = &noop_qdisc;
        }
@@ -408,12 +421,13 @@ out:
        return NULL;
 }
 
-static int drr_init_qdisc(struct Qdisc *sch, struct nlattr *opt)
+static int drr_init_qdisc(struct Qdisc *sch, struct nlattr *opt,
+                         struct netlink_ext_ack *extack)
 {
        struct drr_sched *q = qdisc_priv(sch);
        int err;
 
-       err = tcf_block_get(&q->block, &q->filter_list, sch);
+       err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
        if (err)
                return err;
        err = qdisc_class_hash_init(&q->clhash);
index fb4fb71..049714c 100644 (file)
@@ -61,7 +61,8 @@ static inline int dsmark_valid_index(struct dsmark_qdisc_data *p, u16 index)
 /* ------------------------- Class/flow operations ------------------------- */
 
 static int dsmark_graft(struct Qdisc *sch, unsigned long arg,
-                       struct Qdisc *new, struct Qdisc **old)
+                       struct Qdisc *new, struct Qdisc **old,
+                       struct netlink_ext_ack *extack)
 {
        struct dsmark_qdisc_data *p = qdisc_priv(sch);
 
@@ -70,7 +71,7 @@ static int dsmark_graft(struct Qdisc *sch, unsigned long arg,
 
        if (new == NULL) {
                new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
-                                       sch->handle);
+                                       sch->handle, NULL);
                if (new == NULL)
                        new = &noop_qdisc;
        }
@@ -112,7 +113,8 @@ static const struct nla_policy dsmark_policy[TCA_DSMARK_MAX + 1] = {
 };
 
 static int dsmark_change(struct Qdisc *sch, u32 classid, u32 parent,
-                        struct nlattr **tca, unsigned long *arg)
+                        struct nlattr **tca, unsigned long *arg,
+                        struct netlink_ext_ack *extack)
 {
        struct dsmark_qdisc_data *p = qdisc_priv(sch);
        struct nlattr *opt = tca[TCA_OPTIONS];
@@ -184,7 +186,8 @@ ignore:
        }
 }
 
-static struct tcf_block *dsmark_tcf_block(struct Qdisc *sch, unsigned long cl)
+static struct tcf_block *dsmark_tcf_block(struct Qdisc *sch, unsigned long cl,
+                                         struct netlink_ext_ack *extack)
 {
        struct dsmark_qdisc_data *p = qdisc_priv(sch);
 
@@ -330,7 +333,8 @@ static struct sk_buff *dsmark_peek(struct Qdisc *sch)
        return p->q->ops->peek(p->q);
 }
 
-static int dsmark_init(struct Qdisc *sch, struct nlattr *opt)
+static int dsmark_init(struct Qdisc *sch, struct nlattr *opt,
+                      struct netlink_ext_ack *extack)
 {
        struct dsmark_qdisc_data *p = qdisc_priv(sch);
        struct nlattr *tb[TCA_DSMARK_MAX + 1];
@@ -344,7 +348,7 @@ static int dsmark_init(struct Qdisc *sch, struct nlattr *opt)
        if (!opt)
                goto errout;
 
-       err = tcf_block_get(&p->block, &p->filter_list, sch);
+       err = tcf_block_get(&p->block, &p->filter_list, sch, extack);
        if (err)
                return err;
 
@@ -377,7 +381,8 @@ static int dsmark_init(struct Qdisc *sch, struct nlattr *opt)
        p->default_index = default_index;
        p->set_tc_index = nla_get_flag(tb[TCA_DSMARK_SET_TC_INDEX]);
 
-       p->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, sch->handle);
+       p->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, sch->handle,
+                                NULL);
        if (p->q == NULL)
                p->q = &noop_qdisc;
        else
index 1e37247..24893d3 100644 (file)
@@ -55,7 +55,8 @@ static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch,
        return NET_XMIT_CN;
 }
 
-static int fifo_init(struct Qdisc *sch, struct nlattr *opt)
+static int fifo_init(struct Qdisc *sch, struct nlattr *opt,
+                    struct netlink_ext_ack *extack)
 {
        bool bypass;
        bool is_bfifo = sch->ops == &bfifo_qdisc_ops;
@@ -157,7 +158,7 @@ int fifo_set_limit(struct Qdisc *q, unsigned int limit)
                nla->nla_len = nla_attr_size(sizeof(struct tc_fifo_qopt));
                ((struct tc_fifo_qopt *)nla_data(nla))->limit = limit;
 
-               ret = q->ops->change(q, nla);
+               ret = q->ops->change(q, nla, NULL);
                kfree(nla);
        }
        return ret;
@@ -165,12 +166,14 @@ int fifo_set_limit(struct Qdisc *q, unsigned int limit)
 EXPORT_SYMBOL(fifo_set_limit);
 
 struct Qdisc *fifo_create_dflt(struct Qdisc *sch, struct Qdisc_ops *ops,
-                              unsigned int limit)
+                              unsigned int limit,
+                              struct netlink_ext_ack *extack)
 {
        struct Qdisc *q;
        int err = -ENOMEM;
 
-       q = qdisc_create_dflt(sch->dev_queue, ops, TC_H_MAKE(sch->handle, 1));
+       q = qdisc_create_dflt(sch->dev_queue, ops, TC_H_MAKE(sch->handle, 1),
+                             extack);
        if (q) {
                err = fifo_set_limit(q, limit);
                if (err < 0) {
index 263d16e..a366e4c 100644 (file)
@@ -685,7 +685,8 @@ static const struct nla_policy fq_policy[TCA_FQ_MAX + 1] = {
        [TCA_FQ_LOW_RATE_THRESHOLD]     = { .type = NLA_U32 },
 };
 
-static int fq_change(struct Qdisc *sch, struct nlattr *opt)
+static int fq_change(struct Qdisc *sch, struct nlattr *opt,
+                    struct netlink_ext_ack *extack)
 {
        struct fq_sched_data *q = qdisc_priv(sch);
        struct nlattr *tb[TCA_FQ_MAX + 1];
@@ -788,7 +789,8 @@ static void fq_destroy(struct Qdisc *sch)
        qdisc_watchdog_cancel(&q->watchdog);
 }
 
-static int fq_init(struct Qdisc *sch, struct nlattr *opt)
+static int fq_init(struct Qdisc *sch, struct nlattr *opt,
+                  struct netlink_ext_ack *extack)
 {
        struct fq_sched_data *q = qdisc_priv(sch);
        int err;
@@ -811,7 +813,7 @@ static int fq_init(struct Qdisc *sch, struct nlattr *opt)
        qdisc_watchdog_init(&q->watchdog, sch);
 
        if (opt)
-               err = fq_change(sch, opt);
+               err = fq_change(sch, opt, extack);
        else
                err = fq_resize(sch, q->fq_trees_log);
 
index 0305d79..22fa13c 100644 (file)
@@ -377,7 +377,8 @@ static const struct nla_policy fq_codel_policy[TCA_FQ_CODEL_MAX + 1] = {
        [TCA_FQ_CODEL_MEMORY_LIMIT] = { .type = NLA_U32 },
 };
 
-static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt)
+static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt,
+                          struct netlink_ext_ack *extack)
 {
        struct fq_codel_sched_data *q = qdisc_priv(sch);
        struct nlattr *tb[TCA_FQ_CODEL_MAX + 1];
@@ -458,7 +459,8 @@ static void fq_codel_destroy(struct Qdisc *sch)
        kvfree(q->flows);
 }
 
-static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt)
+static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt,
+                        struct netlink_ext_ack *extack)
 {
        struct fq_codel_sched_data *q = qdisc_priv(sch);
        int i;
@@ -477,12 +479,12 @@ static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt)
        q->cparams.mtu = psched_mtu(qdisc_dev(sch));
 
        if (opt) {
-               int err = fq_codel_change(sch, opt);
+               int err = fq_codel_change(sch, opt, extack);
                if (err)
                        return err;
        }
 
-       err = tcf_block_get(&q->block, &q->filter_list, sch);
+       err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
        if (err)
                return err;
 
@@ -595,7 +597,8 @@ static void fq_codel_unbind(struct Qdisc *q, unsigned long cl)
 {
 }
 
-static struct tcf_block *fq_codel_tcf_block(struct Qdisc *sch, unsigned long cl)
+static struct tcf_block *fq_codel_tcf_block(struct Qdisc *sch, unsigned long cl,
+                                           struct netlink_ext_ack *extack)
 {
        struct fq_codel_sched_data *q = qdisc_priv(sch);
 
index 876fab2..a883c50 100644 (file)
@@ -32,6 +32,7 @@
 #include <net/pkt_sched.h>
 #include <net/dst.h>
 #include <trace/events/qdisc.h>
+#include <net/xfrm.h>
 
 /* Qdisc to use by default */
 const struct Qdisc_ops *default_qdisc_ops = &pfifo_fast_ops;
@@ -111,10 +112,16 @@ static inline void qdisc_enqueue_skb_bad_txq(struct Qdisc *q,
 
 static inline int __dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q)
 {
-       __skb_queue_head(&q->gso_skb, skb);
-       q->qstats.requeues++;
-       qdisc_qstats_backlog_inc(q, skb);
-       q->q.qlen++;    /* it's still part of the queue */
+       while (skb) {
+               struct sk_buff *next = skb->next;
+
+               __skb_queue_tail(&q->gso_skb, skb);
+               q->qstats.requeues++;
+               qdisc_qstats_backlog_inc(q, skb);
+               q->q.qlen++;    /* it's still part of the queue */
+
+               skb = next;
+       }
        __netif_schedule(q);
 
        return 0;
@@ -125,12 +132,19 @@ static inline int dev_requeue_skb_locked(struct sk_buff *skb, struct Qdisc *q)
        spinlock_t *lock = qdisc_lock(q);
 
        spin_lock(lock);
-       __skb_queue_tail(&q->gso_skb, skb);
+       while (skb) {
+               struct sk_buff *next = skb->next;
+
+               __skb_queue_tail(&q->gso_skb, skb);
+
+               qdisc_qstats_cpu_requeues_inc(q);
+               qdisc_qstats_cpu_backlog_inc(q, skb);
+               qdisc_qstats_cpu_qlen_inc(q);
+
+               skb = next;
+       }
        spin_unlock(lock);
 
-       qdisc_qstats_cpu_requeues_inc(q);
-       qdisc_qstats_cpu_backlog_inc(q, skb);
-       qdisc_qstats_cpu_qlen_inc(q);
        __netif_schedule(q);
 
        return 0;
@@ -230,6 +244,8 @@ static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate,
 
                /* skb in gso_skb were already validated */
                *validate = false;
+               if (xfrm_offload(skb))
+                       *validate = true;
                /* check the reason of requeuing without tx lock first */
                txq = skb_get_tx_queue(txq->dev, skb);
                if (!netif_xmit_frozen_or_stopped(txq)) {
@@ -285,6 +301,7 @@ bool sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
                     spinlock_t *root_lock, bool validate)
 {
        int ret = NETDEV_TX_BUSY;
+       bool again = false;
 
        /* And release qdisc */
        if (root_lock)
@@ -292,7 +309,17 @@ bool sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
 
        /* Note that we validate skb (GSO, checksum, ...) outside of locks */
        if (validate)
-               skb = validate_xmit_skb_list(skb, dev);
+               skb = validate_xmit_skb_list(skb, dev, &again);
+
+#ifdef CONFIG_XFRM_OFFLOAD
+       if (unlikely(again)) {
+               if (root_lock)
+                       spin_lock(root_lock);
+
+               dev_requeue_skb(skb, q);
+               return false;
+       }
+#endif
 
        if (likely(skb)) {
                HARD_TX_LOCK(dev, txq, smp_processor_id());
@@ -551,7 +578,8 @@ struct Qdisc noop_qdisc = {
 };
 EXPORT_SYMBOL(noop_qdisc);
 
-static int noqueue_init(struct Qdisc *qdisc, struct nlattr *opt)
+static int noqueue_init(struct Qdisc *qdisc, struct nlattr *opt,
+                       struct netlink_ext_ack *extack)
 {
        /* register_qdisc() assigns a default of noop_enqueue if unset,
         * but __dev_queue_xmit() treats noqueue only as such
@@ -690,7 +718,8 @@ nla_put_failure:
        return -1;
 }
 
-static int pfifo_fast_init(struct Qdisc *qdisc, struct nlattr *opt)
+static int pfifo_fast_init(struct Qdisc *qdisc, struct nlattr *opt,
+                          struct netlink_ext_ack *extack)
 {
        unsigned int qlen = qdisc_dev(qdisc)->tx_queue_len;
        struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
@@ -753,7 +782,8 @@ static struct lock_class_key qdisc_tx_busylock;
 static struct lock_class_key qdisc_running_key;
 
 struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
-                         const struct Qdisc_ops *ops)
+                         const struct Qdisc_ops *ops,
+                         struct netlink_ext_ack *extack)
 {
        void *p;
        struct Qdisc *sch;
@@ -762,6 +792,7 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
        struct net_device *dev;
 
        if (!dev_queue) {
+               NL_SET_ERR_MSG(extack, "No device queue given");
                err = -EINVAL;
                goto errout;
        }
@@ -826,21 +857,24 @@ errout:
 
 struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
                                const struct Qdisc_ops *ops,
-                               unsigned int parentid)
+                               unsigned int parentid,
+                               struct netlink_ext_ack *extack)
 {
        struct Qdisc *sch;
 
-       if (!try_module_get(ops->owner))
+       if (!try_module_get(ops->owner)) {
+               NL_SET_ERR_MSG(extack, "Failed to increase module reference counter");
                return NULL;
+       }
 
-       sch = qdisc_alloc(dev_queue, ops);
+       sch = qdisc_alloc(dev_queue, ops, extack);
        if (IS_ERR(sch)) {
                module_put(ops->owner);
                return NULL;
        }
        sch->parent = parentid;
 
-       if (!ops->init || ops->init(sch, NULL) == 0)
+       if (!ops->init || ops->init(sch, NULL, extack) == 0)
                return sch;
 
        qdisc_destroy(sch);
@@ -952,7 +986,7 @@ static void attach_one_default_qdisc(struct net_device *dev,
        if (dev->priv_flags & IFF_NO_QUEUE)
                ops = &noqueue_qdisc_ops;
 
-       qdisc = qdisc_create_dflt(dev_queue, ops, TC_H_ROOT);
+       qdisc = qdisc_create_dflt(dev_queue, ops, TC_H_ROOT, NULL);
        if (!qdisc) {
                netdev_info(dev, "activation failed\n");
                return;
@@ -975,7 +1009,7 @@ static void attach_default_qdiscs(struct net_device *dev)
                dev->qdisc = txq->qdisc_sleeping;
                qdisc_refcount_inc(dev->qdisc);
        } else {
-               qdisc = qdisc_create_dflt(txq, &mq_qdisc_ops, TC_H_ROOT);
+               qdisc = qdisc_create_dflt(txq, &mq_qdisc_ops, TC_H_ROOT, NULL);
                if (qdisc) {
                        dev->qdisc = qdisc;
                        qdisc->ops->attach(qdisc);
@@ -1240,6 +1274,8 @@ void mini_qdisc_pair_swap(struct mini_Qdisc_pair *miniqp,
 
        if (!tp_head) {
                RCU_INIT_POINTER(*miniqp->p_miniq, NULL);
+               /* Wait for flying RCU callback before it is freed. */
+               rcu_barrier_bh();
                return;
        }
 
@@ -1255,7 +1291,7 @@ void mini_qdisc_pair_swap(struct mini_Qdisc_pair *miniqp,
        rcu_assign_pointer(*miniqp->p_miniq, miniq);
 
        if (miniq_old)
-               /* This is counterpart of the rcu barrier above. We need to
+               /* This is counterpart of the rcu barriers above. We need to
                 * block potential new user of miniq_old until all readers
                 * are not seeing it.
                 */
index bc30f91..cbe4831 100644 (file)
@@ -306,12 +306,13 @@ static inline int gred_change_table_def(struct Qdisc *sch, struct nlattr *dps)
        struct tc_gred_sopt *sopt;
        int i;
 
-       if (dps == NULL)
+       if (!dps)
                return -EINVAL;
 
        sopt = nla_data(dps);
 
-       if (sopt->DPs > MAX_DPs || sopt->DPs == 0 || sopt->def_DP >= sopt->DPs)
+       if (sopt->DPs > MAX_DPs || sopt->DPs == 0 ||
+           sopt->def_DP >= sopt->DPs)
                return -EINVAL;
 
        sch_tree_lock(sch);
@@ -391,7 +392,8 @@ static const struct nla_policy gred_policy[TCA_GRED_MAX + 1] = {
        [TCA_GRED_LIMIT]        = { .type = NLA_U32 },
 };
 
-static int gred_change(struct Qdisc *sch, struct nlattr *opt)
+static int gred_change(struct Qdisc *sch, struct nlattr *opt,
+                      struct netlink_ext_ack *extack)
 {
        struct gred_sched *table = qdisc_priv(sch);
        struct tc_gred_qopt *ctl;
@@ -465,12 +467,13 @@ errout:
        return err;
 }
 
-static int gred_init(struct Qdisc *sch, struct nlattr *opt)
+static int gred_init(struct Qdisc *sch, struct nlattr *opt,
+                    struct netlink_ext_ack *extack)
 {
        struct nlattr *tb[TCA_GRED_MAX + 1];
        int err;
 
-       if (opt == NULL)
+       if (!opt)
                return -EINVAL;
 
        err = nla_parse_nested(tb, TCA_GRED_MAX, opt, gred_policy, NULL);
index d04068a..3ae9877 100644 (file)
@@ -921,7 +921,8 @@ static const struct nla_policy hfsc_policy[TCA_HFSC_MAX + 1] = {
 
 static int
 hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
-                 struct nlattr **tca, unsigned long *arg)
+                 struct nlattr **tca, unsigned long *arg,
+                 struct netlink_ext_ack *extack)
 {
        struct hfsc_sched *q = qdisc_priv(sch);
        struct hfsc_class *cl = (struct hfsc_class *)*arg;
@@ -1033,7 +1034,7 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
        if (cl == NULL)
                return -ENOBUFS;
 
-       err = tcf_block_get(&cl->block, &cl->filter_list, sch);
+       err = tcf_block_get(&cl->block, &cl->filter_list, sch, extack);
        if (err) {
                kfree(cl);
                return err;
@@ -1061,8 +1062,8 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
        cl->cl_common.classid = classid;
        cl->sched     = q;
        cl->cl_parent = parent;
-       cl->qdisc = qdisc_create_dflt(sch->dev_queue,
-                                     &pfifo_qdisc_ops, classid);
+       cl->qdisc = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
+                                     classid, NULL);
        if (cl->qdisc == NULL)
                cl->qdisc = &noop_qdisc;
        else
@@ -1176,7 +1177,7 @@ hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
 
 static int
 hfsc_graft_class(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
-                struct Qdisc **old)
+                struct Qdisc **old, struct netlink_ext_ack *extack)
 {
        struct hfsc_class *cl = (struct hfsc_class *)arg;
 
@@ -1184,7 +1185,7 @@ hfsc_graft_class(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
                return -EINVAL;
        if (new == NULL) {
                new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
-                                       cl->cl_common.classid);
+                                       cl->cl_common.classid, NULL);
                if (new == NULL)
                        new = &noop_qdisc;
        }
@@ -1246,7 +1247,8 @@ hfsc_unbind_tcf(struct Qdisc *sch, unsigned long arg)
        cl->filter_cnt--;
 }
 
-static struct tcf_block *hfsc_tcf_block(struct Qdisc *sch, unsigned long arg)
+static struct tcf_block *hfsc_tcf_block(struct Qdisc *sch, unsigned long arg,
+                                       struct netlink_ext_ack *extack)
 {
        struct hfsc_sched *q = qdisc_priv(sch);
        struct hfsc_class *cl = (struct hfsc_class *)arg;
@@ -1388,7 +1390,8 @@ hfsc_schedule_watchdog(struct Qdisc *sch)
 }
 
 static int
-hfsc_init_qdisc(struct Qdisc *sch, struct nlattr *opt)
+hfsc_init_qdisc(struct Qdisc *sch, struct nlattr *opt,
+               struct netlink_ext_ack *extack)
 {
        struct hfsc_sched *q = qdisc_priv(sch);
        struct tc_hfsc_qopt *qopt;
@@ -1396,7 +1399,7 @@ hfsc_init_qdisc(struct Qdisc *sch, struct nlattr *opt)
 
        qdisc_watchdog_init(&q->watchdog, sch);
 
-       if (opt == NULL || nla_len(opt) < sizeof(*qopt))
+       if (!opt || nla_len(opt) < sizeof(*qopt))
                return -EINVAL;
        qopt = nla_data(opt);
 
@@ -1406,14 +1409,14 @@ hfsc_init_qdisc(struct Qdisc *sch, struct nlattr *opt)
                return err;
        q->eligible = RB_ROOT;
 
-       err = tcf_block_get(&q->root.block, &q->root.filter_list, sch);
+       err = tcf_block_get(&q->root.block, &q->root.filter_list, sch, extack);
        if (err)
                return err;
 
        q->root.cl_common.classid = sch->handle;
        q->root.sched   = q;
        q->root.qdisc = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
-                                         sch->handle);
+                                         sch->handle, NULL);
        if (q->root.qdisc == NULL)
                q->root.qdisc = &noop_qdisc;
        else
@@ -1429,7 +1432,8 @@ hfsc_init_qdisc(struct Qdisc *sch, struct nlattr *opt)
 }
 
 static int
-hfsc_change_qdisc(struct Qdisc *sch, struct nlattr *opt)
+hfsc_change_qdisc(struct Qdisc *sch, struct nlattr *opt,
+                 struct netlink_ext_ack *extack)
 {
        struct hfsc_sched *q = qdisc_priv(sch);
        struct tc_hfsc_qopt *qopt;
index 73a53c0..bce2632 100644 (file)
@@ -504,7 +504,8 @@ static const struct nla_policy hhf_policy[TCA_HHF_MAX + 1] = {
        [TCA_HHF_NON_HH_WEIGHT]  = { .type = NLA_U32 },
 };
 
-static int hhf_change(struct Qdisc *sch, struct nlattr *opt)
+static int hhf_change(struct Qdisc *sch, struct nlattr *opt,
+                     struct netlink_ext_ack *extack)
 {
        struct hhf_sched_data *q = qdisc_priv(sch);
        struct nlattr *tb[TCA_HHF_MAX + 1];
@@ -571,7 +572,8 @@ static int hhf_change(struct Qdisc *sch, struct nlattr *opt)
        return 0;
 }
 
-static int hhf_init(struct Qdisc *sch, struct nlattr *opt)
+static int hhf_init(struct Qdisc *sch, struct nlattr *opt,
+                   struct netlink_ext_ack *extack)
 {
        struct hhf_sched_data *q = qdisc_priv(sch);
        int i;
@@ -589,7 +591,7 @@ static int hhf_init(struct Qdisc *sch, struct nlattr *opt)
        q->hhf_non_hh_weight = 2;
 
        if (opt) {
-               int err = hhf_change(sch, opt);
+               int err = hhf_change(sch, opt, extack);
 
                if (err)
                        return err;
index fa03807..1ea9846 100644 (file)
@@ -1017,7 +1017,8 @@ static void htb_work_func(struct work_struct *work)
        rcu_read_unlock();
 }
 
-static int htb_init(struct Qdisc *sch, struct nlattr *opt)
+static int htb_init(struct Qdisc *sch, struct nlattr *opt,
+                   struct netlink_ext_ack *extack)
 {
        struct htb_sched *q = qdisc_priv(sch);
        struct nlattr *tb[TCA_HTB_MAX + 1];
@@ -1031,7 +1032,7 @@ static int htb_init(struct Qdisc *sch, struct nlattr *opt)
        if (!opt)
                return -EINVAL;
 
-       err = tcf_block_get(&q->block, &q->filter_list, sch);
+       err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
        if (err)
                return err;
 
@@ -1171,7 +1172,7 @@ htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d)
 }
 
 static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
-                    struct Qdisc **old)
+                    struct Qdisc **old, struct netlink_ext_ack *extack)
 {
        struct htb_class *cl = (struct htb_class *)arg;
 
@@ -1179,7 +1180,7 @@ static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
                return -EINVAL;
        if (new == NULL &&
            (new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
-                                    cl->common.classid)) == NULL)
+                                    cl->common.classid, extack)) == NULL)
                return -ENOBUFS;
 
        *old = qdisc_replace(sch, new, &cl->un.leaf.q);
@@ -1289,7 +1290,8 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg)
 
        if (!cl->level && htb_parent_last_child(cl)) {
                new_q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
-                                         cl->parent->common.classid);
+                                         cl->parent->common.classid,
+                                         NULL);
                last_child = 1;
        }
 
@@ -1326,7 +1328,7 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg)
 
 static int htb_change_class(struct Qdisc *sch, u32 classid,
                            u32 parentid, struct nlattr **tca,
-                           unsigned long *arg)
+                           unsigned long *arg, struct netlink_ext_ack *extack)
 {
        int err = -EINVAL;
        struct htb_sched *q = qdisc_priv(sch);
@@ -1356,10 +1358,12 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
 
        /* Keeping backward compatible with rate_table based iproute2 tc */
        if (hopt->rate.linklayer == TC_LINKLAYER_UNAWARE)
-               qdisc_put_rtab(qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB]));
+               qdisc_put_rtab(qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB],
+                                             NULL));
 
        if (hopt->ceil.linklayer == TC_LINKLAYER_UNAWARE)
-               qdisc_put_rtab(qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB]));
+               qdisc_put_rtab(qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB],
+                                             NULL));
 
        if (!cl) {              /* new class */
                struct Qdisc *new_q;
@@ -1394,7 +1398,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
                if (!cl)
                        goto failure;
 
-               err = tcf_block_get(&cl->block, &cl->filter_list, sch);
+               err = tcf_block_get(&cl->block, &cl->filter_list, sch, extack);
                if (err) {
                        kfree(cl);
                        goto failure;
@@ -1423,8 +1427,8 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
                 * so that can't be used inside of sch_tree_lock
                 * -- thanks to Karlis Peisenieks
                 */
-               new_q = qdisc_create_dflt(sch->dev_queue,
-                                         &pfifo_qdisc_ops, classid);
+               new_q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
+                                         classid, NULL);
                sch_tree_lock(sch);
                if (parent && !parent->level) {
                        unsigned int qlen = parent->un.leaf.q->q.qlen;
@@ -1524,7 +1528,8 @@ failure:
        return err;
 }
 
-static struct tcf_block *htb_tcf_block(struct Qdisc *sch, unsigned long arg)
+static struct tcf_block *htb_tcf_block(struct Qdisc *sch, unsigned long arg,
+                                      struct netlink_ext_ack *extack)
 {
        struct htb_sched *q = qdisc_priv(sch);
        struct htb_class *cl = (struct htb_class *)arg;
index fc1286f..7ca2be2 100644 (file)
@@ -48,7 +48,8 @@ static void ingress_walk(struct Qdisc *sch, struct qdisc_walker *walker)
 {
 }
 
-static struct tcf_block *ingress_tcf_block(struct Qdisc *sch, unsigned long cl)
+static struct tcf_block *ingress_tcf_block(struct Qdisc *sch, unsigned long cl,
+                                          struct netlink_ext_ack *extack)
 {
        struct ingress_sched_data *q = qdisc_priv(sch);
 
@@ -62,7 +63,8 @@ static void clsact_chain_head_change(struct tcf_proto *tp_head, void *priv)
        mini_qdisc_pair_swap(miniqp, tp_head);
 }
 
-static int ingress_init(struct Qdisc *sch, struct nlattr *opt)
+static int ingress_init(struct Qdisc *sch, struct nlattr *opt,
+                       struct netlink_ext_ack *extack)
 {
        struct ingress_sched_data *q = qdisc_priv(sch);
        struct net_device *dev = qdisc_dev(sch);
@@ -76,7 +78,7 @@ static int ingress_init(struct Qdisc *sch, struct nlattr *opt)
        q->block_info.chain_head_change = clsact_chain_head_change;
        q->block_info.chain_head_change_priv = &q->miniqp;
 
-       err = tcf_block_get_ext(&q->block, sch, &q->block_info);
+       err = tcf_block_get_ext(&q->block, sch, &q->block_info, extack);
        if (err)
                return err;
 
@@ -153,7 +155,8 @@ static unsigned long clsact_bind_filter(struct Qdisc *sch,
        return clsact_find(sch, classid);
 }
 
-static struct tcf_block *clsact_tcf_block(struct Qdisc *sch, unsigned long cl)
+static struct tcf_block *clsact_tcf_block(struct Qdisc *sch, unsigned long cl,
+                                         struct netlink_ext_ack *extack)
 {
        struct clsact_sched_data *q = qdisc_priv(sch);
 
@@ -167,7 +170,8 @@ static struct tcf_block *clsact_tcf_block(struct Qdisc *sch, unsigned long cl)
        }
 }
 
-static int clsact_init(struct Qdisc *sch, struct nlattr *opt)
+static int clsact_init(struct Qdisc *sch, struct nlattr *opt,
+                      struct netlink_ext_ack *extack)
 {
        struct clsact_sched_data *q = qdisc_priv(sch);
        struct net_device *dev = qdisc_dev(sch);
@@ -182,7 +186,8 @@ static int clsact_init(struct Qdisc *sch, struct nlattr *opt)
        q->ingress_block_info.chain_head_change = clsact_chain_head_change;
        q->ingress_block_info.chain_head_change_priv = &q->miniqp_ingress;
 
-       err = tcf_block_get_ext(&q->ingress_block, sch, &q->ingress_block_info);
+       err = tcf_block_get_ext(&q->ingress_block, sch, &q->ingress_block_info,
+                               extack);
        if (err)
                return err;
 
@@ -192,7 +197,8 @@ static int clsact_init(struct Qdisc *sch, struct nlattr *opt)
        q->egress_block_info.chain_head_change = clsact_chain_head_change;
        q->egress_block_info.chain_head_change_priv = &q->miniqp_egress;
 
-       err = tcf_block_get_ext(&q->egress_block, sch, &q->egress_block_info);
+       err = tcf_block_get_ext(&q->egress_block, sch, &q->egress_block_info,
+                               extack);
        if (err)
                return err;
 
index 8cbb5c8..f062a18 100644 (file)
@@ -36,7 +36,8 @@ static void mq_destroy(struct Qdisc *sch)
        kfree(priv->qdiscs);
 }
 
-static int mq_init(struct Qdisc *sch, struct nlattr *opt)
+static int mq_init(struct Qdisc *sch, struct nlattr *opt,
+                  struct netlink_ext_ack *extack)
 {
        struct net_device *dev = qdisc_dev(sch);
        struct mq_sched *priv = qdisc_priv(sch);
@@ -60,7 +61,8 @@ static int mq_init(struct Qdisc *sch, struct nlattr *opt)
                dev_queue = netdev_get_tx_queue(dev, ntx);
                qdisc = qdisc_create_dflt(dev_queue, get_default_qdisc_ops(dev, ntx),
                                          TC_H_MAKE(TC_H_MAJ(sch->handle),
-                                                   TC_H_MIN(ntx + 1)));
+                                                   TC_H_MIN(ntx + 1)),
+                                         extack);
                if (!qdisc)
                        return -ENOMEM;
                priv->qdiscs[ntx] = qdisc;
@@ -154,7 +156,7 @@ static struct netdev_queue *mq_select_queue(struct Qdisc *sch,
 }
 
 static int mq_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new,
-                   struct Qdisc **old)
+                   struct Qdisc **old, struct netlink_ext_ack *extack)
 {
        struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
        struct net_device *dev = qdisc_dev(sch);
index 8622745..0e9d761 100644 (file)
@@ -132,7 +132,8 @@ static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla,
        return 0;
 }
 
-static int mqprio_init(struct Qdisc *sch, struct nlattr *opt)
+static int mqprio_init(struct Qdisc *sch, struct nlattr *opt,
+                      struct netlink_ext_ack *extack)
 {
        struct net_device *dev = qdisc_dev(sch);
        struct mqprio_sched *priv = qdisc_priv(sch);
@@ -229,7 +230,7 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt)
                qdisc = qdisc_create_dflt(dev_queue,
                                          get_default_qdisc_ops(dev, i),
                                          TC_H_MAKE(TC_H_MAJ(sch->handle),
-                                                   TC_H_MIN(i + 1)));
+                                                   TC_H_MIN(i + 1)), extack);
                if (!qdisc)
                        return -ENOMEM;
 
@@ -319,7 +320,7 @@ static struct netdev_queue *mqprio_queue_get(struct Qdisc *sch,
 }
 
 static int mqprio_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new,
-                   struct Qdisc **old)
+                       struct Qdisc **old, struct netlink_ext_ack *extack)
 {
        struct net_device *dev = qdisc_dev(sch);
        struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
index 0122163..1da7ea8 100644 (file)
@@ -180,7 +180,8 @@ multiq_destroy(struct Qdisc *sch)
        kfree(q->queues);
 }
 
-static int multiq_tune(struct Qdisc *sch, struct nlattr *opt)
+static int multiq_tune(struct Qdisc *sch, struct nlattr *opt,
+                      struct netlink_ext_ack *extack)
 {
        struct multiq_sched_data *q = qdisc_priv(sch);
        struct tc_multiq_qopt *qopt;
@@ -215,7 +216,7 @@ static int multiq_tune(struct Qdisc *sch, struct nlattr *opt)
                        child = qdisc_create_dflt(sch->dev_queue,
                                                  &pfifo_qdisc_ops,
                                                  TC_H_MAKE(sch->handle,
-                                                           i + 1));
+                                                           i + 1), extack);
                        if (child) {
                                sch_tree_lock(sch);
                                old = q->queues[i];
@@ -236,17 +237,18 @@ static int multiq_tune(struct Qdisc *sch, struct nlattr *opt)
        return 0;
 }
 
-static int multiq_init(struct Qdisc *sch, struct nlattr *opt)
+static int multiq_init(struct Qdisc *sch, struct nlattr *opt,
+                      struct netlink_ext_ack *extack)
 {
        struct multiq_sched_data *q = qdisc_priv(sch);
        int i, err;
 
        q->queues = NULL;
 
-       if (opt == NULL)
+       if (!opt)
                return -EINVAL;
 
-       err = tcf_block_get(&q->block, &q->filter_list, sch);
+       err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
        if (err)
                return err;
 
@@ -258,7 +260,7 @@ static int multiq_init(struct Qdisc *sch, struct nlattr *opt)
        for (i = 0; i < q->max_bands; i++)
                q->queues[i] = &noop_qdisc;
 
-       return multiq_tune(sch, opt);
+       return multiq_tune(sch, opt, extack);
 }
 
 static int multiq_dump(struct Qdisc *sch, struct sk_buff *skb)
@@ -281,7 +283,7 @@ nla_put_failure:
 }
 
 static int multiq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
-                     struct Qdisc **old)
+                       struct Qdisc **old, struct netlink_ext_ack *extack)
 {
        struct multiq_sched_data *q = qdisc_priv(sch);
        unsigned long band = arg - 1;
@@ -369,7 +371,8 @@ static void multiq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
        }
 }
 
-static struct tcf_block *multiq_tcf_block(struct Qdisc *sch, unsigned long cl)
+static struct tcf_block *multiq_tcf_block(struct Qdisc *sch, unsigned long cl,
+                                         struct netlink_ext_ack *extack)
 {
        struct multiq_sched_data *q = qdisc_priv(sch);
 
index dd70924..7bbc13b 100644 (file)
@@ -893,7 +893,8 @@ static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla,
 }
 
 /* Parse netlink message to set options */
-static int netem_change(struct Qdisc *sch, struct nlattr *opt)
+static int netem_change(struct Qdisc *sch, struct nlattr *opt,
+                       struct netlink_ext_ack *extack)
 {
        struct netem_sched_data *q = qdisc_priv(sch);
        struct nlattr *tb[TCA_NETEM_MAX + 1];
@@ -984,7 +985,8 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt)
        return ret;
 }
 
-static int netem_init(struct Qdisc *sch, struct nlattr *opt)
+static int netem_init(struct Qdisc *sch, struct nlattr *opt,
+                     struct netlink_ext_ack *extack)
 {
        struct netem_sched_data *q = qdisc_priv(sch);
        int ret;
@@ -995,7 +997,7 @@ static int netem_init(struct Qdisc *sch, struct nlattr *opt)
                return -EINVAL;
 
        q->loss_model = CLG_RANDOM;
-       ret = netem_change(sch, opt);
+       ret = netem_change(sch, opt, extack);
        if (ret)
                pr_info("netem: change failed\n");
        return ret;
@@ -1157,7 +1159,7 @@ static int netem_dump_class(struct Qdisc *sch, unsigned long cl,
 }
 
 static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
-                    struct Qdisc **old)
+                    struct Qdisc **old, struct netlink_ext_ack *extack)
 {
        struct netem_sched_data *q = qdisc_priv(sch);
 
index 776c694..18d30bb 100644 (file)
@@ -181,7 +181,8 @@ static const struct nla_policy pie_policy[TCA_PIE_MAX + 1] = {
        [TCA_PIE_BYTEMODE] = {.type = NLA_U32},
 };
 
-static int pie_change(struct Qdisc *sch, struct nlattr *opt)
+static int pie_change(struct Qdisc *sch, struct nlattr *opt,
+                     struct netlink_ext_ack *extack)
 {
        struct pie_sched_data *q = qdisc_priv(sch);
        struct nlattr *tb[TCA_PIE_MAX + 1];
@@ -439,7 +440,8 @@ static void pie_timer(struct timer_list *t)
 
 }
 
-static int pie_init(struct Qdisc *sch, struct nlattr *opt)
+static int pie_init(struct Qdisc *sch, struct nlattr *opt,
+                   struct netlink_ext_ack *extack)
 {
        struct pie_sched_data *q = qdisc_priv(sch);
 
@@ -451,7 +453,7 @@ static int pie_init(struct Qdisc *sch, struct nlattr *opt)
        timer_setup(&q->adapt_timer, pie_timer, 0);
 
        if (opt) {
-               int err = pie_change(sch, opt);
+               int err = pie_change(sch, opt, extack);
 
                if (err)
                        return err;
index 1c6cbab..5619d2e 100644 (file)
@@ -123,7 +123,8 @@ static struct sk_buff *plug_dequeue(struct Qdisc *sch)
        return qdisc_dequeue_head(sch);
 }
 
-static int plug_init(struct Qdisc *sch, struct nlattr *opt)
+static int plug_init(struct Qdisc *sch, struct nlattr *opt,
+                    struct netlink_ext_ack *extack)
 {
        struct plug_sched_data *q = qdisc_priv(sch);
 
@@ -158,7 +159,8 @@ static int plug_init(struct Qdisc *sch, struct nlattr *opt)
  *   command is received (just act as a pass-thru queue).
  * TCQ_PLUG_LIMIT: Increase/decrease queue size
  */
-static int plug_change(struct Qdisc *sch, struct nlattr *opt)
+static int plug_change(struct Qdisc *sch, struct nlattr *opt,
+                      struct netlink_ext_ack *extack)
 {
        struct plug_sched_data *q = qdisc_priv(sch);
        struct tc_plug_qopt *msg;
index 2c79559..fe1510e 100644 (file)
@@ -153,7 +153,8 @@ prio_destroy(struct Qdisc *sch)
                qdisc_destroy(q->queues[prio]);
 }
 
-static int prio_tune(struct Qdisc *sch, struct nlattr *opt)
+static int prio_tune(struct Qdisc *sch, struct nlattr *opt,
+                    struct netlink_ext_ack *extack)
 {
        struct prio_sched_data *q = qdisc_priv(sch);
        struct Qdisc *queues[TCQ_PRIO_BANDS];
@@ -175,7 +176,8 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt)
        /* Before commit, make sure we can allocate all new qdiscs */
        for (i = oldbands; i < qopt->bands; i++) {
                queues[i] = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
-                                             TC_H_MAKE(sch->handle, i + 1));
+                                             TC_H_MAKE(sch->handle, i + 1),
+                                             extack);
                if (!queues[i]) {
                        while (i > oldbands)
                                qdisc_destroy(queues[--i]);
@@ -205,7 +207,8 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt)
        return 0;
 }
 
-static int prio_init(struct Qdisc *sch, struct nlattr *opt)
+static int prio_init(struct Qdisc *sch, struct nlattr *opt,
+                    struct netlink_ext_ack *extack)
 {
        struct prio_sched_data *q = qdisc_priv(sch);
        int err;
@@ -213,11 +216,11 @@ static int prio_init(struct Qdisc *sch, struct nlattr *opt)
        if (!opt)
                return -EINVAL;
 
-       err = tcf_block_get(&q->block, &q->filter_list, sch);
+       err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
        if (err)
                return err;
 
-       return prio_tune(sch, opt);
+       return prio_tune(sch, opt, extack);
 }
 
 static int prio_dump(struct Qdisc *sch, struct sk_buff *skb)
@@ -240,7 +243,7 @@ nla_put_failure:
 }
 
 static int prio_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
-                     struct Qdisc **old)
+                     struct Qdisc **old, struct netlink_ext_ack *extack)
 {
        struct prio_sched_data *q = qdisc_priv(sch);
        unsigned long band = arg - 1;
@@ -327,7 +330,8 @@ static void prio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
        }
 }
 
-static struct tcf_block *prio_tcf_block(struct Qdisc *sch, unsigned long cl)
+static struct tcf_block *prio_tcf_block(struct Qdisc *sch, unsigned long cl,
+                                       struct netlink_ext_ack *extack)
 {
        struct prio_sched_data *q = qdisc_priv(sch);
 
index 6962b37..bb1a9c1 100644 (file)
@@ -402,7 +402,8 @@ static int qfq_change_agg(struct Qdisc *sch, struct qfq_class *cl, u32 weight,
 }
 
 static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
-                           struct nlattr **tca, unsigned long *arg)
+                           struct nlattr **tca, unsigned long *arg,
+                           struct netlink_ext_ack *extack)
 {
        struct qfq_sched *q = qdisc_priv(sch);
        struct qfq_class *cl = (struct qfq_class *)*arg;
@@ -479,8 +480,8 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
        cl->common.classid = classid;
        cl->deficit = lmax;
 
-       cl->qdisc = qdisc_create_dflt(sch->dev_queue,
-                                     &pfifo_qdisc_ops, classid);
+       cl->qdisc = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
+                                     classid, NULL);
        if (cl->qdisc == NULL)
                cl->qdisc = &noop_qdisc;
 
@@ -564,7 +565,8 @@ static unsigned long qfq_search_class(struct Qdisc *sch, u32 classid)
        return (unsigned long)qfq_find_class(sch, classid);
 }
 
-static struct tcf_block *qfq_tcf_block(struct Qdisc *sch, unsigned long cl)
+static struct tcf_block *qfq_tcf_block(struct Qdisc *sch, unsigned long cl,
+                                      struct netlink_ext_ack *extack)
 {
        struct qfq_sched *q = qdisc_priv(sch);
 
@@ -593,13 +595,14 @@ static void qfq_unbind_tcf(struct Qdisc *sch, unsigned long arg)
 }
 
 static int qfq_graft_class(struct Qdisc *sch, unsigned long arg,
-                          struct Qdisc *new, struct Qdisc **old)
+                          struct Qdisc *new, struct Qdisc **old,
+                          struct netlink_ext_ack *extack)
 {
        struct qfq_class *cl = (struct qfq_class *)arg;
 
        if (new == NULL) {
-               new = qdisc_create_dflt(sch->dev_queue,
-                                       &pfifo_qdisc_ops, cl->common.classid);
+               new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
+                                       cl->common.classid, NULL);
                if (new == NULL)
                        new = &noop_qdisc;
        }
@@ -1413,14 +1416,15 @@ static void qfq_qlen_notify(struct Qdisc *sch, unsigned long arg)
        qfq_deactivate_class(q, cl);
 }
 
-static int qfq_init_qdisc(struct Qdisc *sch, struct nlattr *opt)
+static int qfq_init_qdisc(struct Qdisc *sch, struct nlattr *opt,
+                         struct netlink_ext_ack *extack)
 {
        struct qfq_sched *q = qdisc_priv(sch);
        struct qfq_group *grp;
        int i, j, err;
        u32 max_cl_shift, maxbudg_shift, max_classes;
 
-       err = tcf_block_get(&q->block, &q->filter_list, sch);
+       err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
        if (err)
                return err;
 
index f0747eb..a392eaa 100644 (file)
@@ -157,7 +157,6 @@ static int red_offload(struct Qdisc *sch, bool enable)
                .handle = sch->handle,
                .parent = sch->parent,
        };
-       int err;
 
        if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
                return -EOPNOTSUPP;
@@ -172,14 +171,7 @@ static int red_offload(struct Qdisc *sch, bool enable)
                opt.command = TC_RED_DESTROY;
        }
 
-       err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_RED, &opt);
-
-       if (!err && enable)
-               sch->flags |= TCQ_F_OFFLOADED;
-       else
-               sch->flags &= ~TCQ_F_OFFLOADED;
-
-       return err;
+       return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_RED, &opt);
 }
 
 static void red_destroy(struct Qdisc *sch)
@@ -197,7 +189,8 @@ static const struct nla_policy red_policy[TCA_RED_MAX + 1] = {
        [TCA_RED_MAX_P] = { .type = NLA_U32 },
 };
 
-static int red_change(struct Qdisc *sch, struct nlattr *opt)
+static int red_change(struct Qdisc *sch, struct nlattr *opt,
+                     struct netlink_ext_ack *extack)
 {
        struct red_sched_data *q = qdisc_priv(sch);
        struct nlattr *tb[TCA_RED_MAX + 1];
@@ -224,7 +217,8 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt)
                return -EINVAL;
 
        if (ctl->limit > 0) {
-               child = fifo_create_dflt(sch, &bfifo_qdisc_ops, ctl->limit);
+               child = fifo_create_dflt(sch, &bfifo_qdisc_ops, ctl->limit,
+                                        extack);
                if (IS_ERR(child))
                        return PTR_ERR(child);
        }
@@ -272,14 +266,15 @@ static inline void red_adaptative_timer(struct timer_list *t)
        spin_unlock(root_lock);
 }
 
-static int red_init(struct Qdisc *sch, struct nlattr *opt)
+static int red_init(struct Qdisc *sch, struct nlattr *opt,
+                   struct netlink_ext_ack *extack)
 {
        struct red_sched_data *q = qdisc_priv(sch);
 
        q->qdisc = &noop_qdisc;
        q->sch = sch;
        timer_setup(&q->adapt_timer, red_adaptative_timer, 0);
-       return red_change(sch, opt);
+       return red_change(sch, opt, extack);
 }
 
 static int red_dump_offload_stats(struct Qdisc *sch, struct tc_red_qopt *opt)
@@ -294,12 +289,22 @@ static int red_dump_offload_stats(struct Qdisc *sch, struct tc_red_qopt *opt)
                        .stats.qstats = &sch->qstats,
                },
        };
+       int err;
+
+       sch->flags &= ~TCQ_F_OFFLOADED;
 
-       if (!(sch->flags & TCQ_F_OFFLOADED))
+       if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
+               return 0;
+
+       err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_RED,
+                                           &hw_stats);
+       if (err == -EOPNOTSUPP)
                return 0;
 
-       return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_RED,
-                                            &hw_stats);
+       if (!err)
+               sch->flags |= TCQ_F_OFFLOADED;
+
+       return err;
 }
 
 static int red_dump(struct Qdisc *sch, struct sk_buff *skb)
@@ -380,7 +385,7 @@ static int red_dump_class(struct Qdisc *sch, unsigned long cl,
 }
 
 static int red_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
-                    struct Qdisc **old)
+                    struct Qdisc **old, struct netlink_ext_ack *extack)
 {
        struct red_sched_data *q = qdisc_priv(sch);
 
index 0678deb..7cbdad8 100644 (file)
@@ -488,7 +488,8 @@ static const struct tc_sfb_qopt sfb_default_ops = {
        .penalty_burst = 20,
 };
 
-static int sfb_change(struct Qdisc *sch, struct nlattr *opt)
+static int sfb_change(struct Qdisc *sch, struct nlattr *opt,
+                     struct netlink_ext_ack *extack)
 {
        struct sfb_sched_data *q = qdisc_priv(sch);
        struct Qdisc *child;
@@ -512,7 +513,7 @@ static int sfb_change(struct Qdisc *sch, struct nlattr *opt)
        if (limit == 0)
                limit = qdisc_dev(sch)->tx_queue_len;
 
-       child = fifo_create_dflt(sch, &pfifo_qdisc_ops, limit);
+       child = fifo_create_dflt(sch, &pfifo_qdisc_ops, limit, extack);
        if (IS_ERR(child))
                return PTR_ERR(child);
 
@@ -549,17 +550,18 @@ static int sfb_change(struct Qdisc *sch, struct nlattr *opt)
        return 0;
 }
 
-static int sfb_init(struct Qdisc *sch, struct nlattr *opt)
+static int sfb_init(struct Qdisc *sch, struct nlattr *opt,
+                   struct netlink_ext_ack *extack)
 {
        struct sfb_sched_data *q = qdisc_priv(sch);
        int err;
 
-       err = tcf_block_get(&q->block, &q->filter_list, sch);
+       err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
        if (err)
                return err;
 
        q->qdisc = &noop_qdisc;
-       return sfb_change(sch, opt);
+       return sfb_change(sch, opt, extack);
 }
 
 static int sfb_dump(struct Qdisc *sch, struct sk_buff *skb)
@@ -615,7 +617,7 @@ static int sfb_dump_class(struct Qdisc *sch, unsigned long cl,
 }
 
 static int sfb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
-                    struct Qdisc **old)
+                    struct Qdisc **old, struct netlink_ext_ack *extack)
 {
        struct sfb_sched_data *q = qdisc_priv(sch);
 
@@ -643,7 +645,8 @@ static void sfb_unbind(struct Qdisc *sch, unsigned long arg)
 }
 
 static int sfb_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
-                           struct nlattr **tca, unsigned long *arg)
+                           struct nlattr **tca, unsigned long *arg,
+                           struct netlink_ext_ack *extack)
 {
        return -ENOSYS;
 }
@@ -665,7 +668,8 @@ static void sfb_walk(struct Qdisc *sch, struct qdisc_walker *walker)
        }
 }
 
-static struct tcf_block *sfb_tcf_block(struct Qdisc *sch, unsigned long cl)
+static struct tcf_block *sfb_tcf_block(struct Qdisc *sch, unsigned long cl,
+                                      struct netlink_ext_ack *extack)
 {
        struct sfb_sched_data *q = qdisc_priv(sch);
 
index 930e5bd..2f26781 100644 (file)
@@ -721,7 +721,8 @@ static void sfq_destroy(struct Qdisc *sch)
        kfree(q->red_parms);
 }
 
-static int sfq_init(struct Qdisc *sch, struct nlattr *opt)
+static int sfq_init(struct Qdisc *sch, struct nlattr *opt,
+                   struct netlink_ext_ack *extack)
 {
        struct sfq_sched_data *q = qdisc_priv(sch);
        int i;
@@ -730,7 +731,7 @@ static int sfq_init(struct Qdisc *sch, struct nlattr *opt)
        q->sch = sch;
        timer_setup(&q->perturb_timer, sfq_perturbation, TIMER_DEFERRABLE);
 
-       err = tcf_block_get(&q->block, &q->filter_list, sch);
+       err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
        if (err)
                return err;
 
@@ -836,7 +837,8 @@ static void sfq_unbind(struct Qdisc *q, unsigned long cl)
 {
 }
 
-static struct tcf_block *sfq_tcf_block(struct Qdisc *sch, unsigned long cl)
+static struct tcf_block *sfq_tcf_block(struct Qdisc *sch, unsigned long cl,
+                                      struct netlink_ext_ack *extack)
 {
        struct sfq_sched_data *q = qdisc_priv(sch);
 
index 120f4f3..83e76d0 100644 (file)
@@ -302,7 +302,8 @@ static const struct nla_policy tbf_policy[TCA_TBF_MAX + 1] = {
        [TCA_TBF_PBURST] = { .type = NLA_U32 },
 };
 
-static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
+static int tbf_change(struct Qdisc *sch, struct nlattr *opt,
+                     struct netlink_ext_ack *extack)
 {
        int err;
        struct tbf_sched_data *q = qdisc_priv(sch);
@@ -326,11 +327,13 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
        qopt = nla_data(tb[TCA_TBF_PARMS]);
        if (qopt->rate.linklayer == TC_LINKLAYER_UNAWARE)
                qdisc_put_rtab(qdisc_get_rtab(&qopt->rate,
-                                             tb[TCA_TBF_RTAB]));
+                                             tb[TCA_TBF_RTAB],
+                                             NULL));
 
        if (qopt->peakrate.linklayer == TC_LINKLAYER_UNAWARE)
                        qdisc_put_rtab(qdisc_get_rtab(&qopt->peakrate,
-                                                     tb[TCA_TBF_PTAB]));
+                                                     tb[TCA_TBF_PTAB],
+                                                     NULL));
 
        buffer = min_t(u64, PSCHED_TICKS2NS(qopt->buffer), ~0U);
        mtu = min_t(u64, PSCHED_TICKS2NS(qopt->mtu), ~0U);
@@ -383,7 +386,8 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
                if (err)
                        goto done;
        } else if (qopt->limit > 0) {
-               child = fifo_create_dflt(sch, &bfifo_qdisc_ops, qopt->limit);
+               child = fifo_create_dflt(sch, &bfifo_qdisc_ops, qopt->limit,
+                                        extack);
                if (IS_ERR(child)) {
                        err = PTR_ERR(child);
                        goto done;
@@ -421,19 +425,20 @@ done:
        return err;
 }
 
-static int tbf_init(struct Qdisc *sch, struct nlattr *opt)
+static int tbf_init(struct Qdisc *sch, struct nlattr *opt,
+                   struct netlink_ext_ack *extack)
 {
        struct tbf_sched_data *q = qdisc_priv(sch);
 
        qdisc_watchdog_init(&q->watchdog, sch);
        q->qdisc = &noop_qdisc;
 
-       if (opt == NULL)
+       if (!opt)
                return -EINVAL;
 
        q->t_c = ktime_get_ns();
 
-       return tbf_change(sch, opt);
+       return tbf_change(sch, opt, extack);
 }
 
 static void tbf_destroy(struct Qdisc *sch)
@@ -494,7 +499,7 @@ static int tbf_dump_class(struct Qdisc *sch, unsigned long cl,
 }
 
 static int tbf_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
-                    struct Qdisc **old)
+                    struct Qdisc **old, struct netlink_ext_ack *extack)
 {
        struct tbf_sched_data *q = qdisc_priv(sch);
 
index 9fe6b42..93f04cf 100644 (file)
@@ -167,7 +167,8 @@ teql_destroy(struct Qdisc *sch)
        }
 }
 
-static int teql_qdisc_init(struct Qdisc *sch, struct nlattr *opt)
+static int teql_qdisc_init(struct Qdisc *sch, struct nlattr *opt,
+                          struct netlink_ext_ack *extack)
 {
        struct net_device *dev = qdisc_dev(sch);
        struct teql_master *m = (struct teql_master *)sch->ops;
index d9c04dc..c740b18 100644 (file)
@@ -37,18 +37,6 @@ menuconfig IP_SCTP
 
 if IP_SCTP
 
-config NET_SCTPPROBE
-       tristate "SCTP: Association probing"
-        depends on PROC_FS && KPROBES
-        ---help---
-        This module allows for capturing the changes to SCTP association
-        state in response to incoming packets. It is used for debugging
-        SCTP congestion control algorithms. If you don't understand
-        what was just said, you don't need it: say N.
-
-        To compile this code as a module, choose M here: the
-        module will be called sctp_probe.
-
 config SCTP_DBG_OBJCNT
        bool "SCTP: Debug object counts"
        depends on PROC_FS
index 54bd9c1..6776582 100644 (file)
@@ -4,7 +4,6 @@
 #
 
 obj-$(CONFIG_IP_SCTP) += sctp.o
-obj-$(CONFIG_NET_SCTPPROBE) += sctp_probe.o
 obj-$(CONFIG_INET_SCTP_DIAG) += sctp_diag.o
 
 sctp-y := sm_statetable.o sm_statefuns.o sm_sideeffect.o \
@@ -16,8 +15,6 @@ sctp-y := sm_statetable.o sm_statefuns.o sm_sideeffect.o \
          offload.o stream_sched.o stream_sched_prio.o \
          stream_sched_rr.o stream_interleave.o
 
-sctp_probe-y := probe.o
-
 sctp-$(CONFIG_SCTP_DBG_OBJCNT) += objcnt.o
 sctp-$(CONFIG_PROC_FS) += proc.o
 sctp-$(CONFIG_SYSCTL) += sysctl.o
index 3f619fd..291c97b 100644 (file)
@@ -78,6 +78,9 @@ const char *sctp_cname(const union sctp_subtype cid)
        case SCTP_CID_AUTH:
                return "AUTH";
 
+       case SCTP_CID_RECONF:
+               return "RECONF";
+
        default:
                break;
        }
index ee1e601..8b31468 100644 (file)
@@ -232,7 +232,7 @@ void sctp_endpoint_free(struct sctp_endpoint *ep)
 {
        ep->base.dead = true;
 
-       ep->base.sk->sk_state = SCTP_SS_CLOSED;
+       inet_sk_set_state(ep->base.sk, SCTP_SS_CLOSED);
 
        /* Unlink this endpoint, so we can't find it again! */
        sctp_unhash_endpoint(ep);
diff --git a/net/sctp/probe.c b/net/sctp/probe.c
deleted file mode 100644 (file)
index 1280f85..0000000
+++ /dev/null
@@ -1,244 +0,0 @@
-/*
- * sctp_probe - Observe the SCTP flow with kprobes.
- *
- * The idea for this came from Werner Almesberger's umlsim
- * Copyright (C) 2004, Stephen Hemminger <shemminger@osdl.org>
- *
- * Modified for SCTP from Stephen Hemminger's code
- * Copyright (C) 2010, Wei Yongjun <yjwei@cn.fujitsu.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/kernel.h>
-#include <linux/kprobes.h>
-#include <linux/socket.h>
-#include <linux/sctp.h>
-#include <linux/proc_fs.h>
-#include <linux/vmalloc.h>
-#include <linux/module.h>
-#include <linux/kfifo.h>
-#include <linux/time.h>
-#include <net/net_namespace.h>
-
-#include <net/sctp/sctp.h>
-#include <net/sctp/sm.h>
-
-MODULE_SOFTDEP("pre: sctp");
-MODULE_AUTHOR("Wei Yongjun <yjwei@cn.fujitsu.com>");
-MODULE_DESCRIPTION("SCTP snooper");
-MODULE_LICENSE("GPL");
-
-static int port __read_mostly = 0;
-MODULE_PARM_DESC(port, "Port to match (0=all)");
-module_param(port, int, 0);
-
-static unsigned int fwmark __read_mostly = 0;
-MODULE_PARM_DESC(fwmark, "skb mark to match (0=no mark)");
-module_param(fwmark, uint, 0);
-
-static int bufsize __read_mostly = 64 * 1024;
-MODULE_PARM_DESC(bufsize, "Log buffer size (default 64k)");
-module_param(bufsize, int, 0);
-
-static int full __read_mostly = 1;
-MODULE_PARM_DESC(full, "Full log (1=every ack packet received,  0=only cwnd changes)");
-module_param(full, int, 0);
-
-static const char procname[] = "sctpprobe";
-
-static struct {
-       struct kfifo      fifo;
-       spinlock_t        lock;
-       wait_queue_head_t wait;
-       struct timespec64 tstart;
-} sctpw;
-
-static __printf(1, 2) void printl(const char *fmt, ...)
-{
-       va_list args;
-       int len;
-       char tbuf[256];
-
-       va_start(args, fmt);
-       len = vscnprintf(tbuf, sizeof(tbuf), fmt, args);
-       va_end(args);
-
-       kfifo_in_locked(&sctpw.fifo, tbuf, len, &sctpw.lock);
-       wake_up(&sctpw.wait);
-}
-
-static int sctpprobe_open(struct inode *inode, struct file *file)
-{
-       kfifo_reset(&sctpw.fifo);
-       ktime_get_ts64(&sctpw.tstart);
-
-       return 0;
-}
-
-static ssize_t sctpprobe_read(struct file *file, char __user *buf,
-                             size_t len, loff_t *ppos)
-{
-       int error = 0, cnt = 0;
-       unsigned char *tbuf;
-
-       if (!buf)
-               return -EINVAL;
-
-       if (len == 0)
-               return 0;
-
-       tbuf = vmalloc(len);
-       if (!tbuf)
-               return -ENOMEM;
-
-       error = wait_event_interruptible(sctpw.wait,
-                                        kfifo_len(&sctpw.fifo) != 0);
-       if (error)
-               goto out_free;
-
-       cnt = kfifo_out_locked(&sctpw.fifo, tbuf, len, &sctpw.lock);
-       error = copy_to_user(buf, tbuf, cnt) ? -EFAULT : 0;
-
-out_free:
-       vfree(tbuf);
-
-       return error ? error : cnt;
-}
-
-static const struct file_operations sctpprobe_fops = {
-       .owner  = THIS_MODULE,
-       .open   = sctpprobe_open,
-       .read   = sctpprobe_read,
-       .llseek = noop_llseek,
-};
-
-static enum sctp_disposition jsctp_sf_eat_sack(
-                                       struct net *net,
-                                       const struct sctp_endpoint *ep,
-                                       const struct sctp_association *asoc,
-                                       const union sctp_subtype type,
-                                       void *arg,
-                                       struct sctp_cmd_seq *commands)
-{
-       struct sctp_chunk *chunk = arg;
-       struct sk_buff *skb = chunk->skb;
-       struct sctp_transport *sp;
-       static __u32 lcwnd = 0;
-       struct timespec64 now;
-
-       sp = asoc->peer.primary_path;
-
-       if (((port == 0 && fwmark == 0) ||
-            asoc->peer.port == port ||
-            ep->base.bind_addr.port == port ||
-            (fwmark > 0 && skb->mark == fwmark)) &&
-           (full || sp->cwnd != lcwnd)) {
-               lcwnd = sp->cwnd;
-
-               ktime_get_ts64(&now);
-               now = timespec64_sub(now, sctpw.tstart);
-
-               printl("%lu.%06lu ", (unsigned long) now.tv_sec,
-                      (unsigned long) now.tv_nsec / NSEC_PER_USEC);
-
-               printl("%p %5d %5d %5d %8d %5d ", asoc,
-                      ep->base.bind_addr.port, asoc->peer.port,
-                      asoc->pathmtu, asoc->peer.rwnd, asoc->unack_data);
-
-               list_for_each_entry(sp, &asoc->peer.transport_addr_list,
-                                       transports) {
-                       if (sp == asoc->peer.primary_path)
-                               printl("*");
-
-                       printl("%pISc %2u %8u %8u %8u %8u %8u ",
-                              &sp->ipaddr, sp->state, sp->cwnd, sp->ssthresh,
-                              sp->flight_size, sp->partial_bytes_acked,
-                              sp->pathmtu);
-               }
-               printl("\n");
-       }
-
-       jprobe_return();
-       return 0;
-}
-
-static struct jprobe sctp_recv_probe = {
-       .kp     = {
-               .symbol_name = "sctp_sf_eat_sack_6_2",
-       },
-       .entry  = jsctp_sf_eat_sack,
-};
-
-static __init int sctp_setup_jprobe(void)
-{
-       int ret = register_jprobe(&sctp_recv_probe);
-
-       if (ret) {
-               if (request_module("sctp"))
-                       goto out;
-               ret = register_jprobe(&sctp_recv_probe);
-       }
-
-out:
-       return ret;
-}
-
-static __init int sctpprobe_init(void)
-{
-       int ret = -ENOMEM;
-
-       /* Warning: if the function signature of sctp_sf_eat_sack_6_2,
-        * has been changed, you also have to change the signature of
-        * jsctp_sf_eat_sack, otherwise you end up right here!
-        */
-       BUILD_BUG_ON(__same_type(sctp_sf_eat_sack_6_2,
-                                jsctp_sf_eat_sack) == 0);
-
-       init_waitqueue_head(&sctpw.wait);
-       spin_lock_init(&sctpw.lock);
-       if (kfifo_alloc(&sctpw.fifo, bufsize, GFP_KERNEL))
-               return ret;
-
-       if (!proc_create(procname, S_IRUSR, init_net.proc_net,
-                        &sctpprobe_fops))
-               goto free_kfifo;
-
-       ret = sctp_setup_jprobe();
-       if (ret)
-               goto remove_proc;
-
-       pr_info("probe registered (port=%d/fwmark=%u) bufsize=%u\n",
-               port, fwmark, bufsize);
-       return 0;
-
-remove_proc:
-       remove_proc_entry(procname, init_net.proc_net);
-free_kfifo:
-       kfifo_free(&sctpw.fifo);
-       return ret;
-}
-
-static __exit void sctpprobe_exit(void)
-{
-       kfifo_free(&sctpw.fifo);
-       remove_proc_entry(procname, init_net.proc_net);
-       unregister_jprobe(&sctp_recv_probe);
-}
-
-module_init(sctpprobe_init);
-module_exit(sctpprobe_exit);
index 16ddf2c..b71e7fb 100644 (file)
@@ -878,12 +878,12 @@ static void sctp_cmd_new_state(struct sctp_cmd_seq *cmds,
                 * successfully completed a connect() call.
                 */
                if (sctp_state(asoc, ESTABLISHED) && sctp_sstate(sk, CLOSED))
-                       sk->sk_state = SCTP_SS_ESTABLISHED;
+                       inet_sk_set_state(sk, SCTP_SS_ESTABLISHED);
 
                /* Set the RCV_SHUTDOWN flag when a SHUTDOWN is received. */
                if (sctp_state(asoc, SHUTDOWN_RECEIVED) &&
                    sctp_sstate(sk, ESTABLISHED)) {
-                       sk->sk_state = SCTP_SS_CLOSING;
+                       inet_sk_set_state(sk, SCTP_SS_CLOSING);
                        sk->sk_shutdown |= RCV_SHUTDOWN;
                }
        }
index 541f347..eb7905f 100644 (file)
@@ -59,6 +59,9 @@
 #include <net/sctp/sm.h>
 #include <net/sctp/structs.h>
 
+#define CREATE_TRACE_POINTS
+#include <trace/events/sctp.h>
+
 static struct sctp_packet *sctp_abort_pkt_new(
                                        struct net *net,
                                        const struct sctp_endpoint *ep,
@@ -3219,6 +3222,8 @@ enum sctp_disposition sctp_sf_eat_sack_6_2(struct net *net,
        struct sctp_sackhdr *sackh;
        __u32 ctsn;
 
+       trace_sctp_probe(ep, asoc, chunk);
+
        if (!sctp_vtag_verify(chunk, asoc))
                return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
index 5e4100d..a5e2150 100644 (file)
@@ -1544,7 +1544,7 @@ static void sctp_close(struct sock *sk, long timeout)
 
        lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
        sk->sk_shutdown = SHUTDOWN_MASK;
-       sk->sk_state = SCTP_SS_CLOSING;
+       inet_sk_set_state(sk, SCTP_SS_CLOSING);
 
        ep = sctp_sk(sk)->ep;
 
@@ -4569,7 +4569,7 @@ static int sctp_init_sock(struct sock *sk)
        SCTP_DBG_OBJCNT_INC(sock);
 
        local_bh_disable();
-       percpu_counter_inc(&sctp_sockets_allocated);
+       sk_sockets_allocated_inc(sk);
        sock_prot_inuse_add(net, sk->sk_prot, 1);
 
        /* Nothing can fail after this block, otherwise
@@ -4613,7 +4613,7 @@ static void sctp_destroy_sock(struct sock *sk)
        }
        sctp_endpoint_free(sp->ep);
        local_bh_disable();
-       percpu_counter_dec(&sctp_sockets_allocated);
+       sk_sockets_allocated_dec(sk);
        sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
        local_bh_enable();
 }
@@ -4657,7 +4657,7 @@ static void sctp_shutdown(struct sock *sk, int how)
        if (how & SEND_SHUTDOWN && !list_empty(&ep->asocs)) {
                struct sctp_association *asoc;
 
-               sk->sk_state = SCTP_SS_CLOSING;
+               inet_sk_set_state(sk, SCTP_SS_CLOSING);
                asoc = list_entry(ep->asocs.next,
                                  struct sctp_association, asocs);
                sctp_primitive_SHUTDOWN(net, asoc, NULL);
@@ -7513,13 +7513,13 @@ static int sctp_listen_start(struct sock *sk, int backlog)
         * sockets.
         *
         */
-       sk->sk_state = SCTP_SS_LISTENING;
+       inet_sk_set_state(sk, SCTP_SS_LISTENING);
        if (!ep->base.bind_addr.port) {
                if (sctp_autobind(sk))
                        return -EAGAIN;
        } else {
                if (sctp_get_port(sk, inet_sk(sk)->inet_num)) {
-                       sk->sk_state = SCTP_SS_CLOSED;
+                       inet_sk_set_state(sk, SCTP_SS_CLOSED);
                        return -EADDRINUSE;
                }
        }
@@ -8542,10 +8542,10 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
         * is called, set RCV_SHUTDOWN flag.
         */
        if (sctp_state(assoc, CLOSED) && sctp_style(newsk, TCP)) {
-               newsk->sk_state = SCTP_SS_CLOSED;
+               inet_sk_set_state(newsk, SCTP_SS_CLOSED);
                newsk->sk_shutdown |= RCV_SHUTDOWN;
        } else {
-               newsk->sk_state = SCTP_SS_ESTABLISHED;
+               inet_sk_set_state(newsk, SCTP_SS_ESTABLISHED);
        }
 
        release_sock(newsk);
index 97fae53..0b42710 100644 (file)
@@ -1093,29 +1093,21 @@ void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq,
 void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
                      gfp_t gfp)
 {
-       struct sctp_association *asoc;
-       __u16 needed, freed;
-
-       asoc = ulpq->asoc;
+       struct sctp_association *asoc = ulpq->asoc;
+       __u32 freed = 0;
+       __u16 needed;
 
-       if (chunk) {
-               needed = ntohs(chunk->chunk_hdr->length);
-               needed -= sizeof(struct sctp_data_chunk);
-       } else
-               needed = SCTP_DEFAULT_MAXWINDOW;
-
-       freed = 0;
+       needed = ntohs(chunk->chunk_hdr->length) -
+                sizeof(struct sctp_data_chunk);
 
        if (skb_queue_empty(&asoc->base.sk->sk_receive_queue)) {
                freed = sctp_ulpq_renege_order(ulpq, needed);
-               if (freed < needed) {
+               if (freed < needed)
                        freed += sctp_ulpq_renege_frags(ulpq, needed - freed);
-               }
        }
        /* If able to free enough room, accept this chunk. */
-       if (chunk && (freed >= needed)) {
-               int retval;
-               retval = sctp_ulpq_tail_data(ulpq, chunk, gfp);
+       if (freed >= needed) {
+               int retval = sctp_ulpq_tail_data(ulpq, chunk, gfp);
                /*
                 * Enter partial delivery if chunk has not been
                 * delivered; otherwise, drain the reassembly queue.
index c5fda15..1fdab5c 100644 (file)
@@ -401,7 +401,7 @@ void strp_data_ready(struct strparser *strp)
         * allows a thread in BH context to safely check if the process
         * lock is held. In this case, if the lock is held, queue work.
         */
-       if (sock_owned_by_user(strp->sk)) {
+       if (sock_owned_by_user_nocheck(strp->sk)) {
                queue_work(strp_wq, &strp->work);
                return;
        }
index 47ec121..c800147 100644 (file)
@@ -324,6 +324,7 @@ restart:
        if (res) {
                pr_warn("Bearer <%s> rejected, enable failure (%d)\n",
                        name, -res);
+               kfree(b);
                return -EINVAL;
        }
 
@@ -347,8 +348,10 @@ restart:
        if (skb)
                tipc_bearer_xmit_skb(net, bearer_id, skb, &b->bcast_addr);
 
-       if (tipc_mon_create(net, bearer_id))
+       if (tipc_mon_create(net, bearer_id)) {
+               bearer_disable(net, b);
                return -ENOMEM;
+       }
 
        pr_info("Enabled bearer <%s>, discovery domain %s, priority %u\n",
                name,
index 95fec2c..fb7fe97 100644 (file)
@@ -64,7 +64,7 @@ enum mbr_state {
 struct tipc_member {
        struct rb_node tree_node;
        struct list_head list;
-       struct list_head congested;
+       struct list_head small_win;
        struct sk_buff *event_msg;
        struct sk_buff_head deferredq;
        struct tipc_group *group;
@@ -82,7 +82,7 @@ struct tipc_member {
 
 struct tipc_group {
        struct rb_root members;
-       struct list_head congested;
+       struct list_head small_win;
        struct list_head pending;
        struct list_head active;
        struct list_head reclaiming;
@@ -136,12 +136,12 @@ u16 tipc_group_bc_snd_nxt(struct tipc_group *grp)
        return grp->bc_snd_nxt;
 }
 
-static bool tipc_group_is_enabled(struct tipc_member *m)
+static bool tipc_group_is_receiver(struct tipc_member *m)
 {
        return m->state != MBR_QUARANTINED && m->state != MBR_LEAVING;
 }
 
-static bool tipc_group_is_receiver(struct tipc_member *m)
+static bool tipc_group_is_sender(struct tipc_member *m)
 {
        return m && m->state >= MBR_JOINED;
 }
@@ -168,7 +168,7 @@ struct tipc_group *tipc_group_create(struct net *net, u32 portid,
        if (!grp)
                return NULL;
        tipc_nlist_init(&grp->dests, tipc_own_addr(net));
-       INIT_LIST_HEAD(&grp->congested);
+       INIT_LIST_HEAD(&grp->small_win);
        INIT_LIST_HEAD(&grp->active);
        INIT_LIST_HEAD(&grp->pending);
        INIT_LIST_HEAD(&grp->reclaiming);
@@ -232,7 +232,7 @@ static struct tipc_member *tipc_group_find_dest(struct tipc_group *grp,
        struct tipc_member *m;
 
        m = tipc_group_find_member(grp, node, port);
-       if (m && tipc_group_is_enabled(m))
+       if (m && tipc_group_is_receiver(m))
                return m;
        return NULL;
 }
@@ -285,7 +285,7 @@ static struct tipc_member *tipc_group_create_member(struct tipc_group *grp,
        if (!m)
                return NULL;
        INIT_LIST_HEAD(&m->list);
-       INIT_LIST_HEAD(&m->congested);
+       INIT_LIST_HEAD(&m->small_win);
        __skb_queue_head_init(&m->deferredq);
        m->group = grp;
        m->node = node;
@@ -314,7 +314,7 @@ static void tipc_group_delete_member(struct tipc_group *grp,
                grp->bc_ackers--;
 
        list_del_init(&m->list);
-       list_del_init(&m->congested);
+       list_del_init(&m->small_win);
        tipc_group_decr_active(grp, m);
 
        /* If last member on a node, remove node from dest list */
@@ -343,7 +343,7 @@ void tipc_group_update_member(struct tipc_member *m, int len)
        struct tipc_group *grp = m->group;
        struct tipc_member *_m, *tmp;
 
-       if (!tipc_group_is_enabled(m))
+       if (!tipc_group_is_receiver(m))
                return;
 
        m->window -= len;
@@ -351,17 +351,14 @@ void tipc_group_update_member(struct tipc_member *m, int len)
        if (m->window >= ADV_IDLE)
                return;
 
-       if (!list_empty(&m->congested))
-               return;
+       list_del_init(&m->small_win);
 
-       /* Sort member into congested members' list */
-       list_for_each_entry_safe(_m, tmp, &grp->congested, congested) {
-               if (m->window > _m->window)
-                       continue;
-               list_add_tail(&m->congested, &_m->congested);
-               return;
+       /* Sort member into small_window members' list */
+       list_for_each_entry_safe(_m, tmp, &grp->small_win, small_win) {
+               if (_m->window > m->window)
+                       break;
        }
-       list_add_tail(&m->congested, &grp->congested);
+       list_add_tail(&m->small_win, &_m->small_win);
 }
 
 void tipc_group_update_bc_members(struct tipc_group *grp, int len, bool ack)
@@ -369,18 +366,20 @@ void tipc_group_update_bc_members(struct tipc_group *grp, int len, bool ack)
        u16 prev = grp->bc_snd_nxt - 1;
        struct tipc_member *m;
        struct rb_node *n;
+       u16 ackers = 0;
 
        for (n = rb_first(&grp->members); n; n = rb_next(n)) {
                m = container_of(n, struct tipc_member, tree_node);
-               if (tipc_group_is_enabled(m)) {
+               if (tipc_group_is_receiver(m)) {
                        tipc_group_update_member(m, len);
                        m->bc_acked = prev;
+                       ackers++;
                }
        }
 
        /* Mark number of acknowledges to expect, if any */
        if (ack)
-               grp->bc_ackers = grp->member_cnt;
+               grp->bc_ackers = ackers;
        grp->bc_snd_nxt++;
 }
 
@@ -426,10 +425,10 @@ bool tipc_group_bc_cong(struct tipc_group *grp, int len)
        if (grp->bc_ackers)
                return true;
 
-       if (list_empty(&grp->congested))
+       if (list_empty(&grp->small_win))
                return false;
 
-       m = list_first_entry(&grp->congested, struct tipc_member, congested);
+       m = list_first_entry(&grp->small_win, struct tipc_member, small_win);
        if (m->window >= len)
                return false;
 
@@ -484,7 +483,7 @@ void tipc_group_filter_msg(struct tipc_group *grp, struct sk_buff_head *inputq,
                goto drop;
 
        m = tipc_group_find_member(grp, node, port);
-       if (!tipc_group_is_receiver(m))
+       if (!tipc_group_is_sender(m))
                goto drop;
 
        if (less(msg_grp_bc_seqno(hdr), m->bc_rcv_nxt))
@@ -648,6 +647,7 @@ static void tipc_group_proto_xmit(struct tipc_group *grp, struct tipc_member *m,
        } else if (mtyp == GRP_REMIT_MSG) {
                msg_set_grp_remitted(hdr, m->window);
        }
+       msg_set_dest_droppable(hdr, true);
        __skb_queue_tail(xmitq, skb);
 }
 
@@ -689,15 +689,16 @@ void tipc_group_proto_rcv(struct tipc_group *grp, bool *usr_wakeup,
                        msg_set_grp_bc_seqno(ehdr, m->bc_syncpt);
                        __skb_queue_tail(inputq, m->event_msg);
                }
-               if (m->window < ADV_IDLE)
-                       tipc_group_update_member(m, 0);
-               else
-                       list_del_init(&m->congested);
+               list_del_init(&m->small_win);
+               tipc_group_update_member(m, 0);
                return;
        case GRP_LEAVE_MSG:
                if (!m)
                        return;
                m->bc_syncpt = msg_grp_bc_syncpt(hdr);
+               list_del_init(&m->list);
+               list_del_init(&m->small_win);
+               *usr_wakeup = true;
 
                /* Wait until WITHDRAW event is received */
                if (m->state != MBR_LEAVING) {
@@ -709,8 +710,6 @@ void tipc_group_proto_rcv(struct tipc_group *grp, bool *usr_wakeup,
                ehdr = buf_msg(m->event_msg);
                msg_set_grp_bc_seqno(ehdr, m->bc_syncpt);
                __skb_queue_tail(inputq, m->event_msg);
-               *usr_wakeup = true;
-               list_del_init(&m->congested);
                return;
        case GRP_ADV_MSG:
                if (!m)
@@ -718,7 +717,7 @@ void tipc_group_proto_rcv(struct tipc_group *grp, bool *usr_wakeup,
                m->window += msg_adv_win(hdr);
                *usr_wakeup = m->usr_pending;
                m->usr_pending = false;
-               list_del_init(&m->congested);
+               list_del_init(&m->small_win);
                return;
        case GRP_ACK_MSG:
                if (!m)
@@ -836,10 +835,7 @@ void tipc_group_member_evt(struct tipc_group *grp,
                m->instance = instance;
                TIPC_SKB_CB(skb)->orig_member = m->instance;
                tipc_group_proto_xmit(grp, m, GRP_JOIN_MSG, xmitq);
-               if (m->window < ADV_IDLE)
-                       tipc_group_update_member(m, 0);
-               else
-                       list_del_init(&m->congested);
+               tipc_group_update_member(m, 0);
        } else if (event == TIPC_WITHDRAWN) {
                if (!m)
                        goto drop;
@@ -849,20 +845,30 @@ void tipc_group_member_evt(struct tipc_group *grp,
                *usr_wakeup = true;
                m->usr_pending = false;
                node_up = tipc_node_is_up(net, node);
-
-               /* Hold back event if more messages might be expected */
-               if (m->state != MBR_LEAVING && node_up) {
-                       m->event_msg = skb;
-                       tipc_group_decr_active(grp, m);
-                       m->state = MBR_LEAVING;
-               } else {
-                       if (node_up)
+               m->event_msg = NULL;
+
+               if (node_up) {
+                       /* Hold back event if a LEAVE msg should be expected */
+                       if (m->state != MBR_LEAVING) {
+                               m->event_msg = skb;
+                               tipc_group_decr_active(grp, m);
+                               m->state = MBR_LEAVING;
+                       } else {
                                msg_set_grp_bc_seqno(hdr, m->bc_syncpt);
-                       else
+                               __skb_queue_tail(inputq, skb);
+                       }
+               } else {
+                       if (m->state != MBR_LEAVING) {
+                               tipc_group_decr_active(grp, m);
+                               m->state = MBR_LEAVING;
                                msg_set_grp_bc_seqno(hdr, m->bc_rcv_nxt);
+                       } else {
+                               msg_set_grp_bc_seqno(hdr, m->bc_syncpt);
+                       }
                        __skb_queue_tail(inputq, skb);
                }
-               list_del_init(&m->congested);
+               list_del_init(&m->list);
+               list_del_init(&m->small_win);
        }
        *sk_rcvbuf = tipc_group_rcvbuf_limit(grp);
        return;
index 8e884ed..32dc33a 100644 (file)
@@ -642,9 +642,13 @@ void tipc_mon_delete(struct net *net, int bearer_id)
 {
        struct tipc_net *tn = tipc_net(net);
        struct tipc_monitor *mon = tipc_monitor(net, bearer_id);
-       struct tipc_peer *self = get_self(net, bearer_id);
+       struct tipc_peer *self;
        struct tipc_peer *peer, *tmp;
 
+       if (!mon)
+               return;
+
+       self = get_self(net, bearer_id);
        write_lock_bh(&mon->lock);
        tn->monitors[bearer_id] = NULL;
        list_for_each_entry_safe(peer, tmp, &self->list, list) {
index 0cdf5c2..b51d5cb 100644 (file)
@@ -727,11 +727,11 @@ static unsigned int tipc_poll(struct file *file, struct socket *sock,
 
        switch (sk->sk_state) {
        case TIPC_ESTABLISHED:
+       case TIPC_CONNECTING:
                if (!tsk->cong_link_cnt && !tsk_conn_cong(tsk))
                        revents |= POLLOUT;
                /* fall thru' */
        case TIPC_LISTEN:
-       case TIPC_CONNECTING:
                if (!skb_queue_empty(&sk->sk_receive_queue))
                        revents |= POLLIN | POLLRDNORM;
                break;
index d7d6cb0..1d84f91 100644 (file)
@@ -23,27 +23,14 @@ ifneq ($(CONFIG_CFG80211_EXTRA_REGDB_KEYDIR),)
 cfg80211-y += extra-certs.o
 endif
 
-$(obj)/shipped-certs.c: $(wildcard $(srctree)/$(src)/certs/*.x509)
+$(obj)/shipped-certs.c: $(wildcard $(srctree)/$(src)/certs/*.hex)
        @$(kecho) "  GEN     $@"
-       @(set -e; \
-         allf=""; \
-         for f in $^ ; do \
-             # similar to hexdump -v -e '1/1 "0x%.2x," "\n"' \
-             thisf=$$(od -An -v -tx1 < $$f | \
-                          sed -e 's/ /\n/g' | \
-                          sed -e 's/^[0-9a-f]\+$$/\0/;t;d' | \
-                          sed -e 's/^/0x/;s/$$/,/'); \
-             # file should not be empty - maybe command substitution failed? \
-             test ! -z "$$thisf";\
-             allf=$$allf$$thisf;\
-         done; \
-         ( \
-             echo '#include "reg.h"'; \
-             echo 'const u8 shipped_regdb_certs[] = {'; \
-             echo "$$allf"; \
-             echo '};'; \
-             echo 'unsigned int shipped_regdb_certs_len = sizeof(shipped_regdb_certs);'; \
-         ) >> $@)
+       @(echo '#include "reg.h"'; \
+         echo 'const u8 shipped_regdb_certs[] = {'; \
+         cat $^ ; \
+         echo '};'; \
+         echo 'unsigned int shipped_regdb_certs_len = sizeof(shipped_regdb_certs);'; \
+        ) > $@
 
 $(obj)/extra-certs.c: $(CONFIG_CFG80211_EXTRA_REGDB_KEYDIR:"%"=%) \
                      $(wildcard $(CONFIG_CFG80211_EXTRA_REGDB_KEYDIR:"%"=%)/*.x509)
@@ -66,4 +53,6 @@ $(obj)/extra-certs.c: $(CONFIG_CFG80211_EXTRA_REGDB_KEYDIR:"%"=%) \
              echo "$$allf"; \
              echo '};'; \
              echo 'unsigned int extra_regdb_certs_len = sizeof(extra_regdb_certs);'; \
-         ) >> $@)
+         ) > $@)
+
+clean-files += shipped-certs.c extra-certs.c
diff --git a/net/wireless/certs/sforshee.hex b/net/wireless/certs/sforshee.hex
new file mode 100644 (file)
index 0000000..14ea666
--- /dev/null
@@ -0,0 +1,86 @@
+/* Seth Forshee's regdb certificate */
+0x30, 0x82, 0x02, 0xa4, 0x30, 0x82, 0x01, 0x8c,
+0x02, 0x09, 0x00, 0xb2, 0x8d, 0xdf, 0x47, 0xae,
+0xf9, 0xce, 0xa7, 0x30, 0x0d, 0x06, 0x09, 0x2a,
+0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x0b,
+0x05, 0x00, 0x30, 0x13, 0x31, 0x11, 0x30, 0x0f,
+0x06, 0x03, 0x55, 0x04, 0x03, 0x0c, 0x08, 0x73,
+0x66, 0x6f, 0x72, 0x73, 0x68, 0x65, 0x65, 0x30,
+0x20, 0x17, 0x0d, 0x31, 0x37, 0x31, 0x30, 0x30,
+0x36, 0x31, 0x39, 0x34, 0x30, 0x33, 0x35, 0x5a,
+0x18, 0x0f, 0x32, 0x31, 0x31, 0x37, 0x30, 0x39,
+0x31, 0x32, 0x31, 0x39, 0x34, 0x30, 0x33, 0x35,
+0x5a, 0x30, 0x13, 0x31, 0x11, 0x30, 0x0f, 0x06,
+0x03, 0x55, 0x04, 0x03, 0x0c, 0x08, 0x73, 0x66,
+0x6f, 0x72, 0x73, 0x68, 0x65, 0x65, 0x30, 0x82,
+0x01, 0x22, 0x30, 0x0d, 0x06, 0x09, 0x2a, 0x86,
+0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x01, 0x05,
+0x00, 0x03, 0x82, 0x01, 0x0f, 0x00, 0x30, 0x82,
+0x01, 0x0a, 0x02, 0x82, 0x01, 0x01, 0x00, 0xb5,
+0x40, 0xe3, 0x9c, 0x28, 0x84, 0x39, 0x03, 0xf2,
+0x39, 0xd7, 0x66, 0x2c, 0x41, 0x38, 0x15, 0xac,
+0x7e, 0xa5, 0x83, 0x71, 0x25, 0x7e, 0x90, 0x7c,
+0x68, 0xdd, 0x6f, 0x3f, 0xd9, 0xd7, 0x59, 0x38,
+0x9f, 0x7c, 0x6a, 0x52, 0xc2, 0x03, 0x2a, 0x2d,
+0x7e, 0x66, 0xf4, 0x1e, 0xb3, 0x12, 0x70, 0x20,
+0x5b, 0xd4, 0x97, 0x32, 0x3d, 0x71, 0x8b, 0x3b,
+0x1b, 0x08, 0x17, 0x14, 0x6b, 0x61, 0xc4, 0x57,
+0x8b, 0x96, 0x16, 0x1c, 0xfd, 0x24, 0xd5, 0x0b,
+0x09, 0xf9, 0x68, 0x11, 0x84, 0xfb, 0xca, 0x51,
+0x0c, 0xd1, 0x45, 0x19, 0xda, 0x10, 0x44, 0x8a,
+0xd9, 0xfe, 0x76, 0xa9, 0xfd, 0x60, 0x2d, 0x18,
+0x0b, 0x28, 0x95, 0xb2, 0x2d, 0xea, 0x88, 0x98,
+0xb8, 0xd1, 0x56, 0x21, 0xf0, 0x53, 0x1f, 0xf1,
+0x02, 0x6f, 0xe9, 0x46, 0x9b, 0x93, 0x5f, 0x28,
+0x90, 0x0f, 0xac, 0x36, 0xfa, 0x68, 0x23, 0x71,
+0x57, 0x56, 0xf6, 0xcc, 0xd3, 0xdf, 0x7d, 0x2a,
+0xd9, 0x1b, 0x73, 0x45, 0xeb, 0xba, 0x27, 0x85,
+0xef, 0x7a, 0x7f, 0xa5, 0xcb, 0x80, 0xc7, 0x30,
+0x36, 0xd2, 0x53, 0xee, 0xec, 0xac, 0x1e, 0xe7,
+0x31, 0xf1, 0x36, 0xa2, 0x9c, 0x63, 0xc6, 0x65,
+0x5b, 0x7f, 0x25, 0x75, 0x68, 0xa1, 0xea, 0xd3,
+0x7e, 0x00, 0x5c, 0x9a, 0x5e, 0xd8, 0x20, 0x18,
+0x32, 0x77, 0x07, 0x29, 0x12, 0x66, 0x1e, 0x36,
+0x73, 0xe7, 0x97, 0x04, 0x41, 0x37, 0xb1, 0xb1,
+0x72, 0x2b, 0xf4, 0xa1, 0x29, 0x20, 0x7c, 0x96,
+0x79, 0x0b, 0x2b, 0xd0, 0xd8, 0xde, 0xc8, 0x6c,
+0x3f, 0x93, 0xfb, 0xc5, 0xee, 0x78, 0x52, 0x11,
+0x15, 0x1b, 0x7a, 0xf6, 0xe2, 0x68, 0x99, 0xe7,
+0xfb, 0x46, 0x16, 0x84, 0xe3, 0xc7, 0xa1, 0xe6,
+0xe0, 0xd2, 0x46, 0xd5, 0xe1, 0xc4, 0x5f, 0xa0,
+0x66, 0xf4, 0xda, 0xc4, 0xff, 0x95, 0x1d, 0x02,
+0x03, 0x01, 0x00, 0x01, 0x30, 0x0d, 0x06, 0x09,
+0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01,
+0x0b, 0x05, 0x00, 0x03, 0x82, 0x01, 0x01, 0x00,
+0x87, 0x03, 0xda, 0xf2, 0x82, 0xc2, 0xdd, 0xaf,
+0x7c, 0x44, 0x2f, 0x86, 0xd3, 0x5f, 0x4c, 0x93,
+0x48, 0xb9, 0xfe, 0x07, 0x17, 0xbb, 0x21, 0xf7,
+0x25, 0x23, 0x4e, 0xaa, 0x22, 0x0c, 0x16, 0xb9,
+0x73, 0xae, 0x9d, 0x46, 0x7c, 0x75, 0xd9, 0xc3,
+0x49, 0x57, 0x47, 0xbf, 0x33, 0xb7, 0x97, 0xec,
+0xf5, 0x40, 0x75, 0xc0, 0x46, 0x22, 0xf0, 0xa0,
+0x5d, 0x9c, 0x79, 0x13, 0xa1, 0xff, 0xb8, 0xa3,
+0x2f, 0x7b, 0x8e, 0x06, 0x3f, 0xc8, 0xb6, 0xe4,
+0x6a, 0x28, 0xf2, 0x34, 0x5c, 0x23, 0x3f, 0x32,
+0xc0, 0xe6, 0xad, 0x0f, 0xac, 0xcf, 0x55, 0x74,
+0x47, 0x73, 0xd3, 0x01, 0x85, 0xb7, 0x0b, 0x22,
+0x56, 0x24, 0x7d, 0x9f, 0x09, 0xa9, 0x0e, 0x86,
+0x9e, 0x37, 0x5b, 0x9c, 0x6d, 0x02, 0xd9, 0x8c,
+0xc8, 0x50, 0x6a, 0xe2, 0x59, 0xf3, 0x16, 0x06,
+0xea, 0xb2, 0x42, 0xb5, 0x58, 0xfe, 0xba, 0xd1,
+0x81, 0x57, 0x1a, 0xef, 0xb2, 0x38, 0x88, 0x58,
+0xf6, 0xaa, 0xc4, 0x2e, 0x8b, 0x5a, 0x27, 0xe4,
+0xa5, 0xe8, 0xa4, 0xca, 0x67, 0x5c, 0xac, 0x72,
+0x67, 0xc3, 0x6f, 0x13, 0xc3, 0x2d, 0x35, 0x79,
+0xd7, 0x8a, 0xe7, 0xf5, 0xd4, 0x21, 0x30, 0x4a,
+0xd5, 0xf6, 0xa3, 0xd9, 0x79, 0x56, 0xf2, 0x0f,
+0x10, 0xf7, 0x7d, 0xd0, 0x51, 0x93, 0x2f, 0x47,
+0xf8, 0x7d, 0x4b, 0x0a, 0x84, 0x55, 0x12, 0x0a,
+0x7d, 0x4e, 0x3b, 0x1f, 0x2b, 0x2f, 0xfc, 0x28,
+0xb3, 0x69, 0x34, 0xe1, 0x80, 0x80, 0xbb, 0xe2,
+0xaf, 0xb9, 0xd6, 0x30, 0xf1, 0x1d, 0x54, 0x87,
+0x23, 0x99, 0x9f, 0x51, 0x03, 0x4c, 0x45, 0x7d,
+0x02, 0x65, 0x73, 0xab, 0xfd, 0xcf, 0x94, 0xcc,
+0x0d, 0x3a, 0x60, 0xfd, 0x3c, 0x14, 0x2f, 0x16,
+0x33, 0xa9, 0x21, 0x1f, 0xcb, 0x50, 0xb1, 0x8f,
+0x03, 0xee, 0xa0, 0x66, 0xa9, 0x16, 0x79, 0x14,
diff --git a/net/wireless/certs/sforshee.x509 b/net/wireless/certs/sforshee.x509
deleted file mode 100644 (file)
index c6f8f9d..0000000
Binary files a/net/wireless/certs/sforshee.x509 and /dev/null differ
index 413d4f4..a1d1099 100644 (file)
@@ -126,6 +126,11 @@ static int __cfg80211_join_ibss(struct cfg80211_registered_device *rdev,
        wdev->ibss_fixed = params->channel_fixed;
        wdev->ibss_dfs_possible = params->userspace_handles_dfs;
        wdev->chandef = params->chandef;
+       if (connkeys) {
+               params->wep_keys = connkeys->params;
+               params->wep_tx_key = connkeys->def;
+       }
+
 #ifdef CONFIG_CFG80211_WEXT
        wdev->wext.ibss.chandef = params->chandef;
 #endif
index e7c64a8..bbb9907 100644 (file)
@@ -692,7 +692,7 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
        return rdev_mgmt_tx(rdev, wdev, params, cookie);
 }
 
-bool cfg80211_rx_mgmt(struct wireless_dev *wdev, int freq, int sig_mbm,
+bool cfg80211_rx_mgmt(struct wireless_dev *wdev, int freq, int sig_dbm,
                      const u8 *buf, size_t len, u32 flags)
 {
        struct wiphy *wiphy = wdev->wiphy;
@@ -708,7 +708,7 @@ bool cfg80211_rx_mgmt(struct wireless_dev *wdev, int freq, int sig_mbm,
                cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE);
        u16 stype;
 
-       trace_cfg80211_rx_mgmt(wdev, freq, sig_mbm);
+       trace_cfg80211_rx_mgmt(wdev, freq, sig_dbm);
        stype = (le16_to_cpu(mgmt->frame_control) & IEEE80211_FCTL_STYPE) >> 4;
 
        if (!(stypes->rx & BIT(stype))) {
@@ -735,7 +735,7 @@ bool cfg80211_rx_mgmt(struct wireless_dev *wdev, int freq, int sig_mbm,
 
                /* Indicate the received Action frame to user space */
                if (nl80211_send_mgmt(rdev, wdev, reg->nlportid,
-                                     freq, sig_mbm,
+                                     freq, sig_dbm,
                                      buf, len, flags, GFP_ATOMIC))
                        continue;
 
index b1ac23c..79a9ff6 100644 (file)
@@ -734,11 +734,12 @@ struct key_parse {
        bool def_uni, def_multi;
 };
 
-static int nl80211_parse_key_new(struct nlattr *key, struct key_parse *k)
+static int nl80211_parse_key_new(struct genl_info *info, struct nlattr *key,
+                                struct key_parse *k)
 {
        struct nlattr *tb[NL80211_KEY_MAX + 1];
        int err = nla_parse_nested(tb, NL80211_KEY_MAX, key,
-                                  nl80211_key_policy, NULL);
+                                  nl80211_key_policy, info->extack);
        if (err)
                return err;
 
@@ -771,7 +772,8 @@ static int nl80211_parse_key_new(struct nlattr *key, struct key_parse *k)
        if (tb[NL80211_KEY_TYPE]) {
                k->type = nla_get_u32(tb[NL80211_KEY_TYPE]);
                if (k->type < 0 || k->type >= NUM_NL80211_KEYTYPES)
-                       return -EINVAL;
+                       return genl_err_attr(info, -EINVAL,
+                                            tb[NL80211_KEY_TYPE]);
        }
 
        if (tb[NL80211_KEY_DEFAULT_TYPES]) {
@@ -779,7 +781,8 @@ static int nl80211_parse_key_new(struct nlattr *key, struct key_parse *k)
 
                err = nla_parse_nested(kdt, NUM_NL80211_KEY_DEFAULT_TYPES - 1,
                                       tb[NL80211_KEY_DEFAULT_TYPES],
-                                      nl80211_key_default_policy, NULL);
+                                      nl80211_key_default_policy,
+                                      info->extack);
                if (err)
                        return err;
 
@@ -820,8 +823,10 @@ static int nl80211_parse_key_old(struct genl_info *info, struct key_parse *k)
 
        if (info->attrs[NL80211_ATTR_KEY_TYPE]) {
                k->type = nla_get_u32(info->attrs[NL80211_ATTR_KEY_TYPE]);
-               if (k->type < 0 || k->type >= NUM_NL80211_KEYTYPES)
+               if (k->type < 0 || k->type >= NUM_NL80211_KEYTYPES) {
+                       GENL_SET_ERR_MSG(info, "key type out of range");
                        return -EINVAL;
+               }
        }
 
        if (info->attrs[NL80211_ATTR_KEY_DEFAULT_TYPES]) {
@@ -850,31 +855,42 @@ static int nl80211_parse_key(struct genl_info *info, struct key_parse *k)
        k->type = -1;
 
        if (info->attrs[NL80211_ATTR_KEY])
-               err = nl80211_parse_key_new(info->attrs[NL80211_ATTR_KEY], k);
+               err = nl80211_parse_key_new(info, info->attrs[NL80211_ATTR_KEY], k);
        else
                err = nl80211_parse_key_old(info, k);
 
        if (err)
                return err;
 
-       if (k->def && k->defmgmt)
+       if (k->def && k->defmgmt) {
+               GENL_SET_ERR_MSG(info, "key with def && defmgmt is invalid");
                return -EINVAL;
+       }
 
        if (k->defmgmt) {
-               if (k->def_uni || !k->def_multi)
+               if (k->def_uni || !k->def_multi) {
+                       GENL_SET_ERR_MSG(info, "defmgmt key must be mcast");
                        return -EINVAL;
+               }
        }
 
        if (k->idx != -1) {
                if (k->defmgmt) {
-                       if (k->idx < 4 || k->idx > 5)
+                       if (k->idx < 4 || k->idx > 5) {
+                               GENL_SET_ERR_MSG(info,
+                                                "defmgmt key idx not 4 or 5");
                                return -EINVAL;
+                       }
                } else if (k->def) {
-                       if (k->idx < 0 || k->idx > 3)
+                       if (k->idx < 0 || k->idx > 3) {
+                               GENL_SET_ERR_MSG(info, "def key idx not 0-3");
                                return -EINVAL;
+                       }
                } else {
-                       if (k->idx < 0 || k->idx > 5)
+                       if (k->idx < 0 || k->idx > 5) {
+                               GENL_SET_ERR_MSG(info, "key idx not 0-5");
                                return -EINVAL;
+                       }
                }
        }
 
@@ -883,8 +899,9 @@ static int nl80211_parse_key(struct genl_info *info, struct key_parse *k)
 
 static struct cfg80211_cached_keys *
 nl80211_parse_connkeys(struct cfg80211_registered_device *rdev,
-                      struct nlattr *keys, bool *no_ht)
+                      struct genl_info *info, bool *no_ht)
 {
+       struct nlattr *keys = info->attrs[NL80211_ATTR_KEYS];
        struct key_parse parse;
        struct nlattr *key;
        struct cfg80211_cached_keys *result;
@@ -909,17 +926,22 @@ nl80211_parse_connkeys(struct cfg80211_registered_device *rdev,
                memset(&parse, 0, sizeof(parse));
                parse.idx = -1;
 
-               err = nl80211_parse_key_new(key, &parse);
+               err = nl80211_parse_key_new(info, key, &parse);
                if (err)
                        goto error;
                err = -EINVAL;
                if (!parse.p.key)
                        goto error;
-               if (parse.idx < 0 || parse.idx > 3)
+               if (parse.idx < 0 || parse.idx > 3) {
+                       GENL_SET_ERR_MSG(info, "key index out of range [0-3]");
                        goto error;
+               }
                if (parse.def) {
-                       if (def)
+                       if (def) {
+                               GENL_SET_ERR_MSG(info,
+                                                "only one key can be default");
                                goto error;
+                       }
                        def = 1;
                        result->def = parse.idx;
                        if (!parse.def_uni || !parse.def_multi)
@@ -932,6 +954,7 @@ nl80211_parse_connkeys(struct cfg80211_registered_device *rdev,
                        goto error;
                if (parse.p.cipher != WLAN_CIPHER_SUITE_WEP40 &&
                    parse.p.cipher != WLAN_CIPHER_SUITE_WEP104) {
+                       GENL_SET_ERR_MSG(info, "connect key must be WEP");
                        err = -EINVAL;
                        goto error;
                }
@@ -947,6 +970,7 @@ nl80211_parse_connkeys(struct cfg80211_registered_device *rdev,
 
        if (result->def < 0) {
                err = -EINVAL;
+               GENL_SET_ERR_MSG(info, "need a default/TX key");
                goto error;
        }
 
@@ -2610,7 +2634,7 @@ static int nl80211_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flag
        case NL80211_IFTYPE_AP:
                if (wdev->ssid_len &&
                    nla_put(msg, NL80211_ATTR_SSID, wdev->ssid_len, wdev->ssid))
-                       goto nla_put_failure;
+                       goto nla_put_failure_locked;
                break;
        case NL80211_IFTYPE_STATION:
        case NL80211_IFTYPE_P2P_CLIENT:
@@ -2623,7 +2647,7 @@ static int nl80211_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flag
                if (!ssid_ie)
                        break;
                if (nla_put(msg, NL80211_ATTR_SSID, ssid_ie[1], ssid_ie + 2))
-                       goto nla_put_failure;
+                       goto nla_put_failure_locked;
                break;
                }
        default:
@@ -2635,6 +2659,8 @@ static int nl80211_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flag
        genlmsg_end(msg, hdr);
        return 0;
 
+ nla_put_failure_locked:
+       wdev_unlock(wdev);
  nla_put_failure:
        genlmsg_cancel(msg, hdr);
        return -EMSGSIZE;
@@ -7815,6 +7841,11 @@ static int nl80211_send_bss(struct sk_buff *msg, struct netlink_callback *cb,
                              intbss->ts_boottime, NL80211_BSS_PAD))
                goto nla_put_failure;
 
+       if (!nl80211_put_signal(msg, intbss->pub.chains,
+                               intbss->pub.chain_signal,
+                               NL80211_BSS_CHAIN_SIGNAL))
+               goto nla_put_failure;
+
        switch (rdev->wiphy.signal_type) {
        case CFG80211_SIGNAL_TYPE_MBM:
                if (nla_put_u32(msg, NL80211_BSS_SIGNAL_MBM, res->signal))
@@ -8611,9 +8642,7 @@ static int nl80211_join_ibss(struct sk_buff *skb, struct genl_info *info)
        if (ibss.privacy && info->attrs[NL80211_ATTR_KEYS]) {
                bool no_ht = false;
 
-               connkeys = nl80211_parse_connkeys(rdev,
-                                         info->attrs[NL80211_ATTR_KEYS],
-                                         &no_ht);
+               connkeys = nl80211_parse_connkeys(rdev, info, &no_ht);
                if (IS_ERR(connkeys))
                        return PTR_ERR(connkeys);
 
@@ -9017,8 +9046,7 @@ static int nl80211_connect(struct sk_buff *skb, struct genl_info *info)
        }
 
        if (connect.privacy && info->attrs[NL80211_ATTR_KEYS]) {
-               connkeys = nl80211_parse_connkeys(rdev,
-                                         info->attrs[NL80211_ATTR_KEYS], NULL);
+               connkeys = nl80211_parse_connkeys(rdev, info, NULL);
                if (IS_ERR(connkeys))
                        return PTR_ERR(connkeys);
        }
@@ -13942,7 +13970,7 @@ void nl80211_send_disconnected(struct cfg80211_registered_device *rdev,
 
        if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
            nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
-           (from_ap && reason &&
+           (reason &&
             nla_put_u16(msg, NL80211_ATTR_REASON_CODE, reason)) ||
            (from_ap &&
             nla_put_flag(msg, NL80211_ATTR_DISCONNECTED_BY_AP)) ||
index f6c5fe4..d36c3eb 100644 (file)
@@ -981,6 +981,9 @@ cfg80211_bss_update(struct cfg80211_registered_device *rdev,
                found->ts = tmp->ts;
                found->ts_boottime = tmp->ts_boottime;
                found->parent_tsf = tmp->parent_tsf;
+               found->pub.chains = tmp->pub.chains;
+               memcpy(found->pub.chain_signal, tmp->pub.chain_signal,
+                      IEEE80211_MAX_CHAINS);
                ether_addr_copy(found->parent_bssid, tmp->parent_bssid);
        } else {
                struct cfg80211_internal_bss *new;
@@ -1233,6 +1236,8 @@ cfg80211_inform_bss_frame_data(struct wiphy *wiphy,
        tmp.pub.capability = le16_to_cpu(mgmt->u.probe_resp.capab_info);
        tmp.ts_boottime = data->boottime_ns;
        tmp.parent_tsf = data->parent_tsf;
+       tmp.pub.chains = data->chains;
+       memcpy(tmp.pub.chain_signal, data->chain_signal, IEEE80211_MAX_CHAINS);
        ether_addr_copy(tmp.parent_bssid, data->parent_bssid);
 
        signal_valid = abs(data->chan->center_freq - channel->center_freq) <=
index f3353fe..bcfedd3 100644 (file)
@@ -2544,20 +2544,20 @@ DEFINE_EVENT(cfg80211_netdev_mac_evt, cfg80211_del_sta,
 );
 
 TRACE_EVENT(cfg80211_rx_mgmt,
-       TP_PROTO(struct wireless_dev *wdev, int freq, int sig_mbm),
-       TP_ARGS(wdev, freq, sig_mbm),
+       TP_PROTO(struct wireless_dev *wdev, int freq, int sig_dbm),
+       TP_ARGS(wdev, freq, sig_dbm),
        TP_STRUCT__entry(
                WDEV_ENTRY
                __field(int, freq)
-               __field(int, sig_mbm)
+               __field(int, sig_dbm)
        ),
        TP_fast_assign(
                WDEV_ASSIGN;
                __entry->freq = freq;
-               __entry->sig_mbm = sig_mbm;
+               __entry->sig_dbm = sig_dbm;
        ),
-       TP_printk(WDEV_PR_FMT ", freq: %d, sig mbm: %d",
-                 WDEV_PR_ARG, __entry->freq, __entry->sig_mbm)
+       TP_printk(WDEV_PR_FMT ", freq: %d, sig dbm: %d",
+                 WDEV_PR_ARG, __entry->freq, __entry->sig_dbm)
 );
 
 TRACE_EVENT(cfg80211_mgmt_tx_status,
index 00641b6..7598250 100644 (file)
 #include <linux/notifier.h>
 
 #ifdef CONFIG_XFRM_OFFLOAD
-int validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features)
+struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features, bool *again)
 {
        int err;
+       unsigned long flags;
        struct xfrm_state *x;
+       struct sk_buff *skb2;
+       struct softnet_data *sd;
+       netdev_features_t esp_features = features;
        struct xfrm_offload *xo = xfrm_offload(skb);
 
-       if (skb_is_gso(skb))
-               return 0;
+       if (!xo)
+               return skb;
 
-       if (xo) {
-               x = skb->sp->xvec[skb->sp->len - 1];
-               if (xo->flags & XFRM_GRO || x->xso.flags & XFRM_OFFLOAD_INBOUND)
-                       return 0;
+       if (!(features & NETIF_F_HW_ESP))
+               esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK);
+
+       x = skb->sp->xvec[skb->sp->len - 1];
+       if (xo->flags & XFRM_GRO || x->xso.flags & XFRM_OFFLOAD_INBOUND)
+               return skb;
+
+       local_irq_save(flags);
+       sd = this_cpu_ptr(&softnet_data);
+       err = !skb_queue_empty(&sd->xfrm_backlog);
+       local_irq_restore(flags);
 
+       if (err) {
+               *again = true;
+               return skb;
+       }
+
+       if (skb_is_gso(skb)) {
+               struct net_device *dev = skb->dev;
+
+               if (unlikely(!x->xso.offload_handle || (x->xso.dev != dev))) {
+                       struct sk_buff *segs;
+
+                       /* Packet got rerouted, fixup features and segment it. */
+                       esp_features = esp_features & ~(NETIF_F_HW_ESP
+                                                       | NETIF_F_GSO_ESP);
+
+                       segs = skb_gso_segment(skb, esp_features);
+                       if (IS_ERR(segs)) {
+                               kfree_skb(skb);
+                               atomic_long_inc(&dev->tx_dropped);
+                               return NULL;
+                       } else {
+                               consume_skb(skb);
+                               skb = segs;
+                       }
+               }
+       }
+
+       if (!skb->next) {
                x->outer_mode->xmit(x, skb);
 
-               err = x->type_offload->xmit(x, skb, features);
+               xo->flags |= XFRM_DEV_RESUME;
+
+               err = x->type_offload->xmit(x, skb, esp_features);
                if (err) {
+                       if (err == -EINPROGRESS)
+                               return NULL;
+
                        XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
-                       return err;
+                       kfree_skb(skb);
+                       return NULL;
                }
 
                skb_push(skb, skb->data - skb_mac_header(skb));
+
+               return skb;
        }
 
-       return 0;
+       skb2 = skb;
+
+       do {
+               struct sk_buff *nskb = skb2->next;
+               skb2->next = NULL;
+
+               xo = xfrm_offload(skb2);
+               xo->flags |= XFRM_DEV_RESUME;
+
+               x->outer_mode->xmit(x, skb2);
+
+               err = x->type_offload->xmit(x, skb2, esp_features);
+               if (!err) {
+                       skb2->next = nskb;
+               } else if (err != -EINPROGRESS) {
+                       XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
+                       skb2->next = nskb;
+                       kfree_skb_list(skb2);
+                       return NULL;
+               } else {
+                       if (skb == skb2)
+                               skb = nskb;
+
+                       if (!skb)
+                               return NULL;
+
+                       goto skip_push;
+               }
+
+               skb_push(skb2, skb2->data - skb_mac_header(skb2));
+
+skip_push:
+               skb2 = nskb;
+       } while (skb2);
+
+       return skb;
 }
 EXPORT_SYMBOL_GPL(validate_xmit_xfrm);
 
@@ -120,8 +202,8 @@ bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
        if (!x->type_offload || x->encap)
                return false;
 
-       if ((x->xso.offload_handle && (dev == xfrm_dst_path(dst)->dev)) &&
-            !xdst->child->xfrm && x->type->get_mtu) {
+       if ((!dev || (x->xso.offload_handle && (dev == xfrm_dst_path(dst)->dev))) &&
+            (!xdst->child->xfrm && x->type->get_mtu)) {
                mtu = x->type->get_mtu(x, xdst->child_mtu_cached);
 
                if (skb->len <= mtu)
@@ -140,19 +222,82 @@ ok:
        return true;
 }
 EXPORT_SYMBOL_GPL(xfrm_dev_offload_ok);
+
+void xfrm_dev_resume(struct sk_buff *skb)
+{
+       struct net_device *dev = skb->dev;
+       int ret = NETDEV_TX_BUSY;
+       struct netdev_queue *txq;
+       struct softnet_data *sd;
+       unsigned long flags;
+
+       rcu_read_lock();
+       txq = netdev_pick_tx(dev, skb, NULL);
+
+       HARD_TX_LOCK(dev, txq, smp_processor_id());
+       if (!netif_xmit_frozen_or_stopped(txq))
+               skb = dev_hard_start_xmit(skb, dev, txq, &ret);
+       HARD_TX_UNLOCK(dev, txq);
+
+       if (!dev_xmit_complete(ret)) {
+               local_irq_save(flags);
+               sd = this_cpu_ptr(&softnet_data);
+               skb_queue_tail(&sd->xfrm_backlog, skb);
+               raise_softirq_irqoff(NET_TX_SOFTIRQ);
+               local_irq_restore(flags);
+       }
+       rcu_read_unlock();
+}
+EXPORT_SYMBOL_GPL(xfrm_dev_resume);
+
+void xfrm_dev_backlog(struct softnet_data *sd)
+{
+       struct sk_buff_head *xfrm_backlog = &sd->xfrm_backlog;
+       struct sk_buff_head list;
+       struct sk_buff *skb;
+
+       if (skb_queue_empty(xfrm_backlog))
+               return;
+
+       __skb_queue_head_init(&list);
+
+       spin_lock(&xfrm_backlog->lock);
+       skb_queue_splice_init(xfrm_backlog, &list);
+       spin_unlock(&xfrm_backlog->lock);
+
+       while (!skb_queue_empty(&list)) {
+               skb = __skb_dequeue(&list);
+               xfrm_dev_resume(skb);
+       }
+
+}
 #endif
 
-static int xfrm_dev_register(struct net_device *dev)
+static int xfrm_api_check(struct net_device *dev)
 {
-       if ((dev->features & NETIF_F_HW_ESP) && !dev->xfrmdev_ops)
-               return NOTIFY_BAD;
+#ifdef CONFIG_XFRM_OFFLOAD
        if ((dev->features & NETIF_F_HW_ESP_TX_CSUM) &&
            !(dev->features & NETIF_F_HW_ESP))
                return NOTIFY_BAD;
 
+       if ((dev->features & NETIF_F_HW_ESP) &&
+           (!(dev->xfrmdev_ops &&
+              dev->xfrmdev_ops->xdo_dev_state_add &&
+              dev->xfrmdev_ops->xdo_dev_state_delete)))
+               return NOTIFY_BAD;
+#else
+       if (dev->features & (NETIF_F_HW_ESP | NETIF_F_HW_ESP_TX_CSUM))
+               return NOTIFY_BAD;
+#endif
+
        return NOTIFY_DONE;
 }
 
+static int xfrm_dev_register(struct net_device *dev)
+{
+       return xfrm_api_check(dev);
+}
+
 static int xfrm_dev_unregister(struct net_device *dev)
 {
        xfrm_policy_cache_flush();
@@ -161,16 +306,7 @@ static int xfrm_dev_unregister(struct net_device *dev)
 
 static int xfrm_dev_feat_change(struct net_device *dev)
 {
-       if ((dev->features & NETIF_F_HW_ESP) && !dev->xfrmdev_ops)
-               return NOTIFY_BAD;
-       else if (!(dev->features & NETIF_F_HW_ESP))
-               dev->xfrmdev_ops = NULL;
-
-       if ((dev->features & NETIF_F_HW_ESP_TX_CSUM) &&
-           !(dev->features & NETIF_F_HW_ESP))
-               return NOTIFY_BAD;
-
-       return NOTIFY_DONE;
+       return xfrm_api_check(dev);
 }
 
 static int xfrm_dev_down(struct net_device *dev)
index ac277b9..26b10eb 100644 (file)
@@ -8,15 +8,29 @@
  *
  */
 
+#include <linux/bottom_half.h>
+#include <linux/interrupt.h>
 #include <linux/slab.h>
 #include <linux/module.h>
 #include <linux/netdevice.h>
+#include <linux/percpu.h>
 #include <net/dst.h>
 #include <net/ip.h>
 #include <net/xfrm.h>
 #include <net/ip_tunnels.h>
 #include <net/ip6_tunnel.h>
 
+struct xfrm_trans_tasklet {
+       struct tasklet_struct tasklet;
+       struct sk_buff_head queue;
+};
+
+struct xfrm_trans_cb {
+       int (*finish)(struct net *net, struct sock *sk, struct sk_buff *skb);
+};
+
+#define XFRM_TRANS_SKB_CB(__skb) ((struct xfrm_trans_cb *)&((__skb)->cb[0]))
+
 static struct kmem_cache *secpath_cachep __read_mostly;
 
 static DEFINE_SPINLOCK(xfrm_input_afinfo_lock);
@@ -25,6 +39,8 @@ static struct xfrm_input_afinfo const __rcu *xfrm_input_afinfo[AF_INET6 + 1];
 static struct gro_cells gro_cells;
 static struct net_device xfrm_napi_dev;
 
+static DEFINE_PER_CPU(struct xfrm_trans_tasklet, xfrm_trans_tasklet);
+
 int xfrm_input_register_afinfo(const struct xfrm_input_afinfo *afinfo)
 {
        int err = 0;
@@ -207,7 +223,7 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
        xfrm_address_t *daddr;
        struct xfrm_mode *inner_mode;
        u32 mark = skb->mark;
-       unsigned int family;
+       unsigned int family = AF_UNSPEC;
        int decaps = 0;
        int async = 0;
        bool xfrm_gro = false;
@@ -216,6 +232,16 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
 
        if (encap_type < 0) {
                x = xfrm_input_state(skb);
+
+               if (unlikely(x->km.state != XFRM_STATE_VALID)) {
+                       if (x->km.state == XFRM_STATE_ACQ)
+                               XFRM_INC_STATS(net, LINUX_MIB_XFRMACQUIREERROR);
+                       else
+                               XFRM_INC_STATS(net,
+                                              LINUX_MIB_XFRMINSTATEINVALID);
+                       goto drop;
+               }
+
                family = x->outer_mode->afinfo->family;
 
                /* An encap_type of -1 indicates async resumption. */
@@ -466,9 +492,41 @@ int xfrm_input_resume(struct sk_buff *skb, int nexthdr)
 }
 EXPORT_SYMBOL(xfrm_input_resume);
 
+static void xfrm_trans_reinject(unsigned long data)
+{
+       struct xfrm_trans_tasklet *trans = (void *)data;
+       struct sk_buff_head queue;
+       struct sk_buff *skb;
+
+       __skb_queue_head_init(&queue);
+       skb_queue_splice_init(&trans->queue, &queue);
+
+       while ((skb = __skb_dequeue(&queue)))
+               XFRM_TRANS_SKB_CB(skb)->finish(dev_net(skb->dev), NULL, skb);
+}
+
+int xfrm_trans_queue(struct sk_buff *skb,
+                    int (*finish)(struct net *, struct sock *,
+                                  struct sk_buff *))
+{
+       struct xfrm_trans_tasklet *trans;
+
+       trans = this_cpu_ptr(&xfrm_trans_tasklet);
+
+       if (skb_queue_len(&trans->queue) >= netdev_max_backlog)
+               return -ENOBUFS;
+
+       XFRM_TRANS_SKB_CB(skb)->finish = finish;
+       skb_queue_tail(&trans->queue, skb);
+       tasklet_schedule(&trans->tasklet);
+       return 0;
+}
+EXPORT_SYMBOL(xfrm_trans_queue);
+
 void __init xfrm_input_init(void)
 {
        int err;
+       int i;
 
        init_dummy_netdev(&xfrm_napi_dev);
        err = gro_cells_init(&gro_cells, &xfrm_napi_dev);
@@ -479,4 +537,13 @@ void __init xfrm_input_init(void)
                                           sizeof(struct sec_path),
                                           0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
                                           NULL);
+
+       for_each_possible_cpu(i) {
+               struct xfrm_trans_tasklet *trans;
+
+               trans = &per_cpu(xfrm_trans_tasklet, i);
+               __skb_queue_head_init(&trans->queue);
+               tasklet_init(&trans->tasklet, xfrm_trans_reinject,
+                            (unsigned long)trans);
+       }
 }
index e3a5aca..d8a8129 100644 (file)
@@ -1168,9 +1168,15 @@ static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir,
  again:
        pol = rcu_dereference(sk->sk_policy[dir]);
        if (pol != NULL) {
-               bool match = xfrm_selector_match(&pol->selector, fl, family);
+               bool match;
                int err = 0;
 
+               if (pol->family != family) {
+                       pol = NULL;
+                       goto out;
+               }
+
+               match = xfrm_selector_match(&pol->selector, fl, family);
                if (match) {
                        if ((sk->sk_mark & pol->mark.m) != pol->mark.v) {
                                pol = NULL;
@@ -1835,6 +1841,7 @@ xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
                   sizeof(struct xfrm_policy *) * num_pols) == 0 &&
            xfrm_xdst_can_reuse(xdst, xfrm, err)) {
                dst_hold(&xdst->u.dst);
+               xfrm_pols_put(pols, num_pols);
                while (err > 0)
                        xfrm_state_put(xfrm[--err]);
                return xdst;
index 1b7856b..cc4c519 100644 (file)
@@ -1343,6 +1343,7 @@ static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig,
 
        if (orig->aead) {
                x->aead = xfrm_algo_aead_clone(orig->aead);
+               x->geniv = orig->geniv;
                if (!x->aead)
                        goto error;
        }
index 983b023..bdb48e5 100644 (file)
@@ -1419,11 +1419,14 @@ static void copy_templates(struct xfrm_policy *xp, struct xfrm_user_tmpl *ut,
 
 static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family)
 {
+       u16 prev_family;
        int i;
 
        if (nr > XFRM_MAX_DEPTH)
                return -EINVAL;
 
+       prev_family = family;
+
        for (i = 0; i < nr; i++) {
                /* We never validated the ut->family value, so many
                 * applications simply leave it at zero.  The check was
@@ -1435,6 +1438,12 @@ static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family)
                if (!ut[i].family)
                        ut[i].family = family;
 
+               if ((ut[i].mode == XFRM_MODE_TRANSPORT) &&
+                   (ut[i].family != prev_family))
+                       return -EINVAL;
+
+               prev_family = ut[i].family;
+
                switch (ut[i].family) {
                case AF_INET:
                        break;
@@ -1445,6 +1454,21 @@ static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family)
                default:
                        return -EINVAL;
                }
+
+               switch (ut[i].id.proto) {
+               case IPPROTO_AH:
+               case IPPROTO_ESP:
+               case IPPROTO_COMP:
+#if IS_ENABLED(CONFIG_IPV6)
+               case IPPROTO_ROUTING:
+               case IPPROTO_DSTOPTS:
+#endif
+               case IPSEC_PROTO_ANY:
+                       break;
+               default:
+                       return -EINVAL;
+               }
+
        }
 
        return 0;
@@ -2470,7 +2494,7 @@ static const struct nla_policy xfrma_policy[XFRMA_MAX+1] = {
        [XFRMA_PROTO]           = { .type = NLA_U8 },
        [XFRMA_ADDRESS_FILTER]  = { .len = sizeof(struct xfrm_address_filter) },
        [XFRMA_OFFLOAD_DEV]     = { .len = sizeof(struct xfrm_user_offload) },
-       [XFRMA_OUTPUT_MARK]     = { .len = NLA_U32 },
+       [XFRMA_OUTPUT_MARK]     = { .type = NLA_U32 },
 };
 
 static const struct nla_policy xfrma_spd_policy[XFRMA_SPD_MAX+1] = {
index 4fb944a..3ff7a05 100644 (file)
@@ -41,6 +41,7 @@ hostprogs-y += xdp_redirect
 hostprogs-y += xdp_redirect_map
 hostprogs-y += xdp_redirect_cpu
 hostprogs-y += xdp_monitor
+hostprogs-y += xdp_rxq_info
 hostprogs-y += syscall_tp
 
 # Libbpf dependencies
@@ -90,6 +91,7 @@ xdp_redirect-objs := bpf_load.o $(LIBBPF) xdp_redirect_user.o
 xdp_redirect_map-objs := bpf_load.o $(LIBBPF) xdp_redirect_map_user.o
 xdp_redirect_cpu-objs := bpf_load.o $(LIBBPF) xdp_redirect_cpu_user.o
 xdp_monitor-objs := bpf_load.o $(LIBBPF) xdp_monitor_user.o
+xdp_rxq_info-objs := bpf_load.o $(LIBBPF) xdp_rxq_info_user.o
 syscall_tp-objs := bpf_load.o $(LIBBPF) syscall_tp_user.o
 
 # Tell kbuild to always build the programs
@@ -139,6 +141,7 @@ always += xdp_redirect_kern.o
 always += xdp_redirect_map_kern.o
 always += xdp_redirect_cpu_kern.o
 always += xdp_monitor_kern.o
+always += xdp_rxq_info_kern.o
 always += syscall_tp_kern.o
 
 HOSTCFLAGS += -I$(objtree)/usr/include
@@ -182,6 +185,7 @@ HOSTLOADLIBES_xdp_redirect += -lelf
 HOSTLOADLIBES_xdp_redirect_map += -lelf
 HOSTLOADLIBES_xdp_redirect_cpu += -lelf
 HOSTLOADLIBES_xdp_monitor += -lelf
+HOSTLOADLIBES_xdp_rxq_info += -lelf
 HOSTLOADLIBES_syscall_tp += -lelf
 
 # Allows pointing LLC/CLANG to a LLVM backend with bpf support, redefine on cmdline:
diff --git a/samples/bpf/xdp_rxq_info_kern.c b/samples/bpf/xdp_rxq_info_kern.c
new file mode 100644 (file)
index 0000000..3fd2092
--- /dev/null
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Copyright (c) 2017 Jesper Dangaard Brouer, Red Hat Inc.
+ *
+ *  Example howto extract XDP RX-queue info
+ */
+#include <uapi/linux/bpf.h>
+#include "bpf_helpers.h"
+
+/* Config setup from with userspace
+ *
+ * User-side setup ifindex in config_map, to verify that
+ * ctx->ingress_ifindex is correct (against configured ifindex)
+ */
+struct config {
+       __u32 action;
+       int ifindex;
+};
+struct bpf_map_def SEC("maps") config_map = {
+       .type           = BPF_MAP_TYPE_ARRAY,
+       .key_size       = sizeof(int),
+       .value_size     = sizeof(struct config),
+       .max_entries    = 1,
+};
+
+/* Common stats data record (shared with userspace) */
+struct datarec {
+       __u64 processed;
+       __u64 issue;
+};
+
+struct bpf_map_def SEC("maps") stats_global_map = {
+       .type           = BPF_MAP_TYPE_PERCPU_ARRAY,
+       .key_size       = sizeof(u32),
+       .value_size     = sizeof(struct datarec),
+       .max_entries    = 1,
+};
+
+#define MAX_RXQs 64
+
+/* Stats per rx_queue_index (per CPU) */
+struct bpf_map_def SEC("maps") rx_queue_index_map = {
+       .type           = BPF_MAP_TYPE_PERCPU_ARRAY,
+       .key_size       = sizeof(u32),
+       .value_size     = sizeof(struct datarec),
+       .max_entries    = MAX_RXQs + 1,
+};
+
+SEC("xdp_prog0")
+int  xdp_prognum0(struct xdp_md *ctx)
+{
+       void *data_end = (void *)(long)ctx->data_end;
+       void *data     = (void *)(long)ctx->data;
+       struct datarec *rec, *rxq_rec;
+       int ingress_ifindex;
+       struct config *config;
+       u32 key = 0;
+
+       /* Global stats record */
+       rec = bpf_map_lookup_elem(&stats_global_map, &key);
+       if (!rec)
+               return XDP_ABORTED;
+       rec->processed++;
+
+       /* Accessing ctx->ingress_ifindex, cause BPF to rewrite BPF
+        * instructions inside kernel to access xdp_rxq->dev->ifindex
+        */
+       ingress_ifindex = ctx->ingress_ifindex;
+
+       config = bpf_map_lookup_elem(&config_map, &key);
+       if (!config)
+               return XDP_ABORTED;
+
+       /* Simple test: check ctx provided ifindex is as expected */
+       if (ingress_ifindex != config->ifindex) {
+               /* count this error case */
+               rec->issue++;
+               return XDP_ABORTED;
+       }
+
+       /* Update stats per rx_queue_index. Handle if rx_queue_index
+        * is larger than stats map can contain info for.
+        */
+       key = ctx->rx_queue_index;
+       if (key >= MAX_RXQs)
+               key = MAX_RXQs;
+       rxq_rec = bpf_map_lookup_elem(&rx_queue_index_map, &key);
+       if (!rxq_rec)
+               return XDP_ABORTED;
+       rxq_rec->processed++;
+       if (key == MAX_RXQs)
+               rxq_rec->issue++;
+
+       return config->action;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/samples/bpf/xdp_rxq_info_user.c b/samples/bpf/xdp_rxq_info_user.c
new file mode 100644 (file)
index 0000000..32430e8
--- /dev/null
@@ -0,0 +1,531 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Copyright (c) 2017 Jesper Dangaard Brouer, Red Hat Inc.
+ */
+static const char *__doc__ = " XDP RX-queue info extract example\n\n"
+       "Monitor how many packets per sec (pps) are received\n"
+       "per NIC RX queue index and which CPU processed the packet\n"
+       ;
+
+#include <errno.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdbool.h>
+#include <string.h>
+#include <unistd.h>
+#include <locale.h>
+#include <sys/resource.h>
+#include <getopt.h>
+#include <net/if.h>
+#include <time.h>
+
+#include <arpa/inet.h>
+#include <linux/if_link.h>
+
+#include "libbpf.h"
+#include "bpf_load.h"
+#include "bpf_util.h"
+
+static int ifindex = -1;
+static char ifname_buf[IF_NAMESIZE];
+static char *ifname;
+
+static __u32 xdp_flags;
+
+/* Exit return codes */
+#define EXIT_OK                0
+#define EXIT_FAIL              1
+#define EXIT_FAIL_OPTION       2
+#define EXIT_FAIL_XDP          3
+#define EXIT_FAIL_BPF          4
+#define EXIT_FAIL_MEM          5
+
+static const struct option long_options[] = {
+       {"help",        no_argument,            NULL, 'h' },
+       {"dev",         required_argument,      NULL, 'd' },
+       {"skb-mode",    no_argument,            NULL, 'S' },
+       {"sec",         required_argument,      NULL, 's' },
+       {"no-separators", no_argument,          NULL, 'z' },
+       {"action",      required_argument,      NULL, 'a' },
+       {0, 0, NULL,  0 }
+};
+
+static void int_exit(int sig)
+{
+       fprintf(stderr,
+               "Interrupted: Removing XDP program on ifindex:%d device:%s\n",
+               ifindex, ifname);
+       if (ifindex > -1)
+               set_link_xdp_fd(ifindex, -1, xdp_flags);
+       exit(EXIT_OK);
+}
+
+struct config {
+       __u32 action;
+       int ifindex;
+};
+#define XDP_ACTION_MAX (XDP_TX + 1)
+#define XDP_ACTION_MAX_STRLEN 11
+static const char *xdp_action_names[XDP_ACTION_MAX] = {
+       [XDP_ABORTED]   = "XDP_ABORTED",
+       [XDP_DROP]      = "XDP_DROP",
+       [XDP_PASS]      = "XDP_PASS",
+       [XDP_TX]        = "XDP_TX",
+};
+
+static const char *action2str(int action)
+{
+       if (action < XDP_ACTION_MAX)
+               return xdp_action_names[action];
+       return NULL;
+}
+
+static int parse_xdp_action(char *action_str)
+{
+       size_t maxlen;
+       __u64 action = -1;
+       int i;
+
+       for (i = 0; i < XDP_ACTION_MAX; i++) {
+               maxlen = XDP_ACTION_MAX_STRLEN;
+               if (strncmp(xdp_action_names[i], action_str, maxlen) == 0) {
+                       action = i;
+                       break;
+               }
+       }
+       return action;
+}
+
+static void list_xdp_actions(void)
+{
+       int i;
+
+       printf("Available XDP --action <options>\n");
+       for (i = 0; i < XDP_ACTION_MAX; i++)
+               printf("\t%s\n", xdp_action_names[i]);
+       printf("\n");
+}
+
+static void usage(char *argv[])
+{
+       int i;
+
+       printf("\nDOCUMENTATION:\n%s\n", __doc__);
+       printf(" Usage: %s (options-see-below)\n", argv[0]);
+       printf(" Listing options:\n");
+       for (i = 0; long_options[i].name != 0; i++) {
+               printf(" --%-12s", long_options[i].name);
+               if (long_options[i].flag != NULL)
+                       printf(" flag (internal value:%d)",
+                               *long_options[i].flag);
+               else
+                       printf(" short-option: -%c",
+                               long_options[i].val);
+               printf("\n");
+       }
+       printf("\n");
+       list_xdp_actions();
+}
+
+#define NANOSEC_PER_SEC 1000000000 /* 10^9 */
+static __u64 gettime(void)
+{
+       struct timespec t;
+       int res;
+
+       res = clock_gettime(CLOCK_MONOTONIC, &t);
+       if (res < 0) {
+               fprintf(stderr, "Error with gettimeofday! (%i)\n", res);
+               exit(EXIT_FAIL);
+       }
+       return (__u64) t.tv_sec * NANOSEC_PER_SEC + t.tv_nsec;
+}
+
+/* Common stats data record shared with _kern.c */
+struct datarec {
+       __u64 processed;
+       __u64 issue;
+};
+struct record {
+       __u64 timestamp;
+       struct datarec total;
+       struct datarec *cpu;
+};
+struct stats_record {
+       struct record stats;
+       struct record *rxq;
+};
+
+static struct datarec *alloc_record_per_cpu(void)
+{
+       unsigned int nr_cpus = bpf_num_possible_cpus();
+       struct datarec *array;
+       size_t size;
+
+       size = sizeof(struct datarec) * nr_cpus;
+       array = malloc(size);
+       memset(array, 0, size);
+       if (!array) {
+               fprintf(stderr, "Mem alloc error (nr_cpus:%u)\n", nr_cpus);
+               exit(EXIT_FAIL_MEM);
+       }
+       return array;
+}
+
+static struct record *alloc_record_per_rxq(void)
+{
+       unsigned int nr_rxqs = map_data[2].def.max_entries;
+       struct record *array;
+       size_t size;
+
+       size = sizeof(struct record) * nr_rxqs;
+       array = malloc(size);
+       memset(array, 0, size);
+       if (!array) {
+               fprintf(stderr, "Mem alloc error (nr_rxqs:%u)\n", nr_rxqs);
+               exit(EXIT_FAIL_MEM);
+       }
+       return array;
+}
+
+static struct stats_record *alloc_stats_record(void)
+{
+       unsigned int nr_rxqs = map_data[2].def.max_entries;
+       struct stats_record *rec;
+       int i;
+
+       rec = malloc(sizeof(*rec));
+       memset(rec, 0, sizeof(*rec));
+       if (!rec) {
+               fprintf(stderr, "Mem alloc error\n");
+               exit(EXIT_FAIL_MEM);
+       }
+       rec->rxq = alloc_record_per_rxq();
+       for (i = 0; i < nr_rxqs; i++)
+               rec->rxq[i].cpu = alloc_record_per_cpu();
+
+       rec->stats.cpu = alloc_record_per_cpu();
+       return rec;
+}
+
+static void free_stats_record(struct stats_record *r)
+{
+       unsigned int nr_rxqs = map_data[2].def.max_entries;
+       int i;
+
+       for (i = 0; i < nr_rxqs; i++)
+               free(r->rxq[i].cpu);
+
+       free(r->rxq);
+       free(r->stats.cpu);
+       free(r);
+}
+
+static bool map_collect_percpu(int fd, __u32 key, struct record *rec)
+{
+       /* For percpu maps, userspace gets a value per possible CPU */
+       unsigned int nr_cpus = bpf_num_possible_cpus();
+       struct datarec values[nr_cpus];
+       __u64 sum_processed = 0;
+       __u64 sum_issue = 0;
+       int i;
+
+       if ((bpf_map_lookup_elem(fd, &key, values)) != 0) {
+               fprintf(stderr,
+                       "ERR: bpf_map_lookup_elem failed key:0x%X\n", key);
+               return false;
+       }
+       /* Get time as close as possible to reading map contents */
+       rec->timestamp = gettime();
+
+       /* Record and sum values from each CPU */
+       for (i = 0; i < nr_cpus; i++) {
+               rec->cpu[i].processed = values[i].processed;
+               sum_processed        += values[i].processed;
+               rec->cpu[i].issue = values[i].issue;
+               sum_issue        += values[i].issue;
+       }
+       rec->total.processed = sum_processed;
+       rec->total.issue     = sum_issue;
+       return true;
+}
+
+static void stats_collect(struct stats_record *rec)
+{
+       int fd, i, max_rxqs;
+
+       fd = map_data[1].fd; /* map: stats_global_map */
+       map_collect_percpu(fd, 0, &rec->stats);
+
+       fd = map_data[2].fd; /* map: rx_queue_index_map */
+       max_rxqs = map_data[2].def.max_entries;
+       for (i = 0; i < max_rxqs; i++)
+               map_collect_percpu(fd, i, &rec->rxq[i]);
+}
+
+static double calc_period(struct record *r, struct record *p)
+{
+       double period_ = 0;
+       __u64 period = 0;
+
+       period = r->timestamp - p->timestamp;
+       if (period > 0)
+               period_ = ((double) period / NANOSEC_PER_SEC);
+
+       return period_;
+}
+
+static __u64 calc_pps(struct datarec *r, struct datarec *p, double period_)
+{
+       __u64 packets = 0;
+       __u64 pps = 0;
+
+       if (period_ > 0) {
+               packets = r->processed - p->processed;
+               pps = packets / period_;
+       }
+       return pps;
+}
+
+static __u64 calc_errs_pps(struct datarec *r,
+                           struct datarec *p, double period_)
+{
+       __u64 packets = 0;
+       __u64 pps = 0;
+
+       if (period_ > 0) {
+               packets = r->issue - p->issue;
+               pps = packets / period_;
+       }
+       return pps;
+}
+
+static void stats_print(struct stats_record *stats_rec,
+                       struct stats_record *stats_prev,
+                       int action)
+{
+       unsigned int nr_cpus = bpf_num_possible_cpus();
+       unsigned int nr_rxqs = map_data[2].def.max_entries;
+       double pps = 0, err = 0;
+       struct record *rec, *prev;
+       double t;
+       int rxq;
+       int i;
+
+       /* Header */
+       printf("\nRunning XDP on dev:%s (ifindex:%d) action:%s\n",
+              ifname, ifindex, action2str(action));
+
+       /* stats_global_map */
+       {
+               char *fmt_rx = "%-15s %-7d %'-11.0f %'-10.0f %s\n";
+               char *fm2_rx = "%-15s %-7s %'-11.0f\n";
+               char *errstr = "";
+
+               printf("%-15s %-7s %-11s %-11s\n",
+                      "XDP stats", "CPU", "pps", "issue-pps");
+
+               rec  =  &stats_rec->stats;
+               prev = &stats_prev->stats;
+               t = calc_period(rec, prev);
+               for (i = 0; i < nr_cpus; i++) {
+                       struct datarec *r = &rec->cpu[i];
+                       struct datarec *p = &prev->cpu[i];
+
+                       pps = calc_pps     (r, p, t);
+                       err = calc_errs_pps(r, p, t);
+                       if (err > 0)
+                               errstr = "invalid-ifindex";
+                       if (pps > 0)
+                               printf(fmt_rx, "XDP-RX CPU",
+                                       i, pps, err, errstr);
+               }
+               pps  = calc_pps     (&rec->total, &prev->total, t);
+               err  = calc_errs_pps(&rec->total, &prev->total, t);
+               printf(fm2_rx, "XDP-RX CPU", "total", pps, err);
+       }
+
+       /* rx_queue_index_map */
+       printf("\n%-15s %-7s %-11s %-11s\n",
+              "RXQ stats", "RXQ:CPU", "pps", "issue-pps");
+
+       for (rxq = 0; rxq < nr_rxqs; rxq++) {
+               char *fmt_rx = "%-15s %3d:%-3d %'-11.0f %'-10.0f %s\n";
+               char *fm2_rx = "%-15s %3d:%-3s %'-11.0f\n";
+               char *errstr = "";
+               int rxq_ = rxq;
+
+               /* Last RXQ in map catch overflows */
+               if (rxq_ == nr_rxqs - 1)
+                       rxq_ = -1;
+
+               rec  =  &stats_rec->rxq[rxq];
+               prev = &stats_prev->rxq[rxq];
+               t = calc_period(rec, prev);
+               for (i = 0; i < nr_cpus; i++) {
+                       struct datarec *r = &rec->cpu[i];
+                       struct datarec *p = &prev->cpu[i];
+
+                       pps = calc_pps     (r, p, t);
+                       err = calc_errs_pps(r, p, t);
+                       if (err > 0) {
+                               if (rxq_ == -1)
+                                       errstr = "map-overflow-RXQ";
+                               else
+                                       errstr = "err";
+                       }
+                       if (pps > 0)
+                               printf(fmt_rx, "rx_queue_index",
+                                      rxq_, i, pps, err, errstr);
+               }
+               pps  = calc_pps     (&rec->total, &prev->total, t);
+               err  = calc_errs_pps(&rec->total, &prev->total, t);
+               if (pps || err)
+                       printf(fm2_rx, "rx_queue_index", rxq_, "sum", pps, err);
+       }
+}
+
+
+/* Pointer swap trick */
+static inline void swap(struct stats_record **a, struct stats_record **b)
+{
+       struct stats_record *tmp;
+
+       tmp = *a;
+       *a = *b;
+       *b = tmp;
+}
+
+static void stats_poll(int interval, int action)
+{
+       struct stats_record *record, *prev;
+
+       record = alloc_stats_record();
+       prev   = alloc_stats_record();
+       stats_collect(record);
+
+       while (1) {
+               swap(&prev, &record);
+               stats_collect(record);
+               stats_print(record, prev, action);
+               sleep(interval);
+       }
+
+       free_stats_record(record);
+       free_stats_record(prev);
+}
+
+
+int main(int argc, char **argv)
+{
+       struct rlimit r = {10 * 1024 * 1024, RLIM_INFINITY};
+       bool use_separators = true;
+       struct config cfg = { 0 };
+       char filename[256];
+       int longindex = 0;
+       int interval = 2;
+       __u32 key = 0;
+       int opt, err;
+
+       char action_str_buf[XDP_ACTION_MAX_STRLEN + 1 /* for \0 */] = { 0 };
+       int action = XDP_PASS; /* Default action */
+       char *action_str = NULL;
+
+       snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+
+       if (setrlimit(RLIMIT_MEMLOCK, &r)) {
+               perror("setrlimit(RLIMIT_MEMLOCK)");
+               return 1;
+       }
+
+       if (load_bpf_file(filename)) {
+               fprintf(stderr, "ERR in load_bpf_file(): %s", bpf_log_buf);
+               return EXIT_FAIL;
+       }
+
+       if (!prog_fd[0]) {
+               fprintf(stderr, "ERR: load_bpf_file: %s\n", strerror(errno));
+               return EXIT_FAIL;
+       }
+
+       /* Parse commands line args */
+       while ((opt = getopt_long(argc, argv, "hSd:",
+                                 long_options, &longindex)) != -1) {
+               switch (opt) {
+               case 'd':
+                       if (strlen(optarg) >= IF_NAMESIZE) {
+                               fprintf(stderr, "ERR: --dev name too long\n");
+                               goto error;
+                       }
+                       ifname = (char *)&ifname_buf;
+                       strncpy(ifname, optarg, IF_NAMESIZE);
+                       ifindex = if_nametoindex(ifname);
+                       if (ifindex == 0) {
+                               fprintf(stderr,
+                                       "ERR: --dev name unknown err(%d):%s\n",
+                                       errno, strerror(errno));
+                               goto error;
+                       }
+                       break;
+               case 's':
+                       interval = atoi(optarg);
+                       break;
+               case 'S':
+                       xdp_flags |= XDP_FLAGS_SKB_MODE;
+                       break;
+               case 'z':
+                       use_separators = false;
+                       break;
+               case 'a':
+                       action_str = (char *)&action_str_buf;
+                       strncpy(action_str, optarg, XDP_ACTION_MAX_STRLEN);
+                       break;
+               case 'h':
+               error:
+               default:
+                       usage(argv);
+                       return EXIT_FAIL_OPTION;
+               }
+       }
+       /* Required option */
+       if (ifindex == -1) {
+               fprintf(stderr, "ERR: required option --dev missing\n");
+               usage(argv);
+               return EXIT_FAIL_OPTION;
+       }
+       cfg.ifindex = ifindex;
+
+       /* Parse action string */
+       if (action_str) {
+               action = parse_xdp_action(action_str);
+               if (action < 0) {
+                       fprintf(stderr, "ERR: Invalid XDP --action: %s\n",
+                               action_str);
+                       list_xdp_actions();
+                       return EXIT_FAIL_OPTION;
+               }
+       }
+       cfg.action = action;
+
+       /* Trick to pretty printf with thousands separators use %' */
+       if (use_separators)
+               setlocale(LC_NUMERIC, "en_US");
+
+       /* User-side setup ifindex in config_map */
+       err = bpf_map_update_elem(map_fd[0], &key, &cfg, 0);
+       if (err) {
+               fprintf(stderr, "Store config failed (err:%d)\n", err);
+               exit(EXIT_FAIL_BPF);
+       }
+
+       /* Remove XDP program when program is interrupted */
+       signal(SIGINT, int_exit);
+
+       if (set_link_xdp_fd(ifindex, prog_fd[0], xdp_flags) < 0) {
+               fprintf(stderr, "link set xdp fd failed\n");
+               return EXIT_FAIL_XDP;
+       }
+
+       stats_poll(interval, action);
+       return EXIT_OK;
+}
index b3b353d..f055ca1 100644 (file)
@@ -579,15 +579,14 @@ static int snd_rawmidi_info_user(struct snd_rawmidi_substream *substream,
        return 0;
 }
 
-int snd_rawmidi_info_select(struct snd_card *card, struct snd_rawmidi_info *info)
+static int __snd_rawmidi_info_select(struct snd_card *card,
+                                    struct snd_rawmidi_info *info)
 {
        struct snd_rawmidi *rmidi;
        struct snd_rawmidi_str *pstr;
        struct snd_rawmidi_substream *substream;
 
-       mutex_lock(&register_mutex);
        rmidi = snd_rawmidi_search(card, info->device);
-       mutex_unlock(&register_mutex);
        if (!rmidi)
                return -ENXIO;
        if (info->stream < 0 || info->stream > 1)
@@ -603,6 +602,16 @@ int snd_rawmidi_info_select(struct snd_card *card, struct snd_rawmidi_info *info
        }
        return -ENXIO;
 }
+
+int snd_rawmidi_info_select(struct snd_card *card, struct snd_rawmidi_info *info)
+{
+       int ret;
+
+       mutex_lock(&register_mutex);
+       ret = __snd_rawmidi_info_select(card, info);
+       mutex_unlock(&register_mutex);
+       return ret;
+}
 EXPORT_SYMBOL(snd_rawmidi_info_select);
 
 static int snd_rawmidi_info_select_user(struct snd_card *card,
index 038a180..cbe818e 100644 (file)
@@ -325,7 +325,7 @@ static int hdac_component_master_match(struct device *dev, void *data)
  */
 int snd_hdac_i915_register_notifier(const struct i915_audio_component_audio_ops *aops)
 {
-       if (WARN_ON(!hdac_acomp))
+       if (!hdac_acomp)
                return -ENODEV;
 
        hdac_acomp->audio_ops = aops;
index a81aacf..37e1cf8 100644 (file)
@@ -271,6 +271,8 @@ enum {
        CXT_FIXUP_HP_SPECTRE,
        CXT_FIXUP_HP_GATE_MIC,
        CXT_FIXUP_MUTE_LED_GPIO,
+       CXT_FIXUP_HEADSET_MIC,
+       CXT_FIXUP_HP_MIC_NO_PRESENCE,
 };
 
 /* for hda_fixup_thinkpad_acpi() */
@@ -350,6 +352,18 @@ static void cxt_fixup_headphone_mic(struct hda_codec *codec,
        }
 }
 
+static void cxt_fixup_headset_mic(struct hda_codec *codec,
+                                   const struct hda_fixup *fix, int action)
+{
+       struct conexant_spec *spec = codec->spec;
+
+       switch (action) {
+       case HDA_FIXUP_ACT_PRE_PROBE:
+               spec->parse_flags |= HDA_PINCFG_HEADSET_MIC;
+               break;
+       }
+}
+
 /* OPLC XO 1.5 fixup */
 
 /* OLPC XO-1.5 supports DC input mode (e.g. for use with analog sensors)
@@ -880,6 +894,19 @@ static const struct hda_fixup cxt_fixups[] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = cxt_fixup_mute_led_gpio,
        },
+       [CXT_FIXUP_HEADSET_MIC] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = cxt_fixup_headset_mic,
+       },
+       [CXT_FIXUP_HP_MIC_NO_PRESENCE] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = (const struct hda_pintbl[]) {
+                       { 0x1a, 0x02a1113c },
+                       { }
+               },
+               .chained = true,
+               .chain_id = CXT_FIXUP_HEADSET_MIC,
+       },
 };
 
 static const struct snd_pci_quirk cxt5045_fixups[] = {
@@ -934,6 +961,8 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
        SND_PCI_QUIRK(0x103c, 0x8115, "HP Z1 Gen3", CXT_FIXUP_HP_GATE_MIC),
        SND_PCI_QUIRK(0x103c, 0x814f, "HP ZBook 15u G3", CXT_FIXUP_MUTE_LED_GPIO),
        SND_PCI_QUIRK(0x103c, 0x822e, "HP ProBook 440 G4", CXT_FIXUP_MUTE_LED_GPIO),
+       SND_PCI_QUIRK(0x103c, 0x8299, "HP 800 G3 SFF", CXT_FIXUP_HP_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x103c, 0x829a, "HP 800 G3 DM", CXT_FIXUP_HP_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1043, 0x138d, "Asus", CXT_FIXUP_HEADPHONE_MIC_PIN),
        SND_PCI_QUIRK(0x152d, 0x0833, "OLPC XO-1.5", CXT_FIXUP_OLPC_XO),
        SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo T400", CXT_PINCFG_LENOVO_TP410),
index c19c81d..b4f1b6e 100644 (file)
@@ -55,10 +55,11 @@ MODULE_PARM_DESC(static_hdmi_pcm, "Don't restrict PCM parameters per ELD info");
 #define is_kabylake(codec) ((codec)->core.vendor_id == 0x8086280b)
 #define is_geminilake(codec) (((codec)->core.vendor_id == 0x8086280d) || \
                                ((codec)->core.vendor_id == 0x80862800))
+#define is_cannonlake(codec) ((codec)->core.vendor_id == 0x8086280c)
 #define is_haswell_plus(codec) (is_haswell(codec) || is_broadwell(codec) \
                                || is_skylake(codec) || is_broxton(codec) \
-                               || is_kabylake(codec)) || is_geminilake(codec)
-
+                               || is_kabylake(codec)) || is_geminilake(codec) \
+                               || is_cannonlake(codec)
 #define is_valleyview(codec) ((codec)->core.vendor_id == 0x80862882)
 #define is_cherryview(codec) ((codec)->core.vendor_id == 0x80862883)
 #define is_valleyview_plus(codec) (is_valleyview(codec) || is_cherryview(codec))
@@ -3841,6 +3842,7 @@ HDA_CODEC_ENTRY(0x80862808, "Broadwell HDMI",     patch_i915_hsw_hdmi),
 HDA_CODEC_ENTRY(0x80862809, "Skylake HDMI",    patch_i915_hsw_hdmi),
 HDA_CODEC_ENTRY(0x8086280a, "Broxton HDMI",    patch_i915_hsw_hdmi),
 HDA_CODEC_ENTRY(0x8086280b, "Kabylake HDMI",   patch_i915_hsw_hdmi),
+HDA_CODEC_ENTRY(0x8086280c, "Cannonlake HDMI", patch_i915_glk_hdmi),
 HDA_CODEC_ENTRY(0x8086280d, "Geminilake HDMI", patch_i915_glk_hdmi),
 HDA_CODEC_ENTRY(0x80862800, "Geminilake HDMI", patch_i915_glk_hdmi),
 HDA_CODEC_ENTRY(0x80862880, "CedarTrail HDMI", patch_generic_hdmi),
index 4b21f71..8fd2d9c 100644 (file)
@@ -324,8 +324,12 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
        case 0x10ec0292:
                alc_update_coef_idx(codec, 0x4, 1<<15, 0);
                break;
-       case 0x10ec0215:
        case 0x10ec0225:
+       case 0x10ec0295:
+       case 0x10ec0299:
+               alc_update_coef_idx(codec, 0x67, 0xf000, 0x3000);
+               /* fallthrough */
+       case 0x10ec0215:
        case 0x10ec0233:
        case 0x10ec0236:
        case 0x10ec0255:
@@ -336,10 +340,8 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
        case 0x10ec0286:
        case 0x10ec0288:
        case 0x10ec0285:
-       case 0x10ec0295:
        case 0x10ec0298:
        case 0x10ec0289:
-       case 0x10ec0299:
                alc_update_coef_idx(codec, 0x10, 1<<9, 0);
                break;
        case 0x10ec0275:
@@ -5185,6 +5187,22 @@ static void alc233_alc662_fixup_lenovo_dual_codecs(struct hda_codec *codec,
        }
 }
 
+/* Forcibly assign NID 0x03 to HP/LO while NID 0x02 to SPK for EQ */
+static void alc274_fixup_bind_dacs(struct hda_codec *codec,
+                                   const struct hda_fixup *fix, int action)
+{
+       struct alc_spec *spec = codec->spec;
+       static hda_nid_t preferred_pairs[] = {
+               0x21, 0x03, 0x1b, 0x03, 0x16, 0x02,
+               0
+       };
+
+       if (action != HDA_FIXUP_ACT_PRE_PROBE)
+               return;
+
+       spec->gen.preferred_dacs = preferred_pairs;
+}
+
 /* for hda_fixup_thinkpad_acpi() */
 #include "thinkpad_helper.c"
 
@@ -5302,6 +5320,8 @@ enum {
        ALC233_FIXUP_LENOVO_MULTI_CODECS,
        ALC294_FIXUP_LENOVO_MIC_LOCATION,
        ALC700_FIXUP_INTEL_REFERENCE,
+       ALC274_FIXUP_DELL_BIND_DACS,
+       ALC274_FIXUP_DELL_AIO_LINEOUT_VERB,
 };
 
 static const struct hda_fixup alc269_fixups[] = {
@@ -6112,6 +6132,21 @@ static const struct hda_fixup alc269_fixups[] = {
                        {}
                }
        },
+       [ALC274_FIXUP_DELL_BIND_DACS] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = alc274_fixup_bind_dacs,
+               .chained = true,
+               .chain_id = ALC269_FIXUP_DELL1_MIC_NO_PRESENCE
+       },
+       [ALC274_FIXUP_DELL_AIO_LINEOUT_VERB] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = (const struct hda_pintbl[]) {
+                       { 0x1b, 0x0401102f },
+                       { }
+               },
+               .chained = true,
+               .chain_id = ALC274_FIXUP_DELL_BIND_DACS
+       },
 };
 
 static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -6295,6 +6330,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
        SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
        SND_PCI_QUIRK(0x17aa, 0x310c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
+       SND_PCI_QUIRK(0x17aa, 0x313c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
        SND_PCI_QUIRK(0x17aa, 0x3112, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
        SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
        SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
@@ -6553,6 +6589,11 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
                {0x1b, 0x01011020},
                {0x21, 0x02211010}),
        SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+               {0x12, 0x90a60130},
+               {0x14, 0x90170110},
+               {0x1b, 0x01011020},
+               {0x21, 0x0221101f}),
+       SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
                {0x12, 0x90a60160},
                {0x14, 0x90170120},
                {0x21, 0x02211030}),
@@ -6578,7 +6619,7 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
                {0x14, 0x90170110},
                {0x1b, 0x90a70130},
                {0x21, 0x03211020}),
-       SND_HDA_PIN_QUIRK(0x10ec0274, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
+       SND_HDA_PIN_QUIRK(0x10ec0274, 0x1028, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB,
                {0x12, 0xb7a60130},
                {0x13, 0xb8a61140},
                {0x16, 0x90170110},
index 9f521a5..b5e41df 100644 (file)
@@ -1051,6 +1051,11 @@ static int acp_audio_probe(struct platform_device *pdev)
        struct resource *res;
        const u32 *pdata = pdev->dev.platform_data;
 
+       if (!pdata) {
+               dev_err(&pdev->dev, "Missing platform data\n");
+               return -ENODEV;
+       }
+
        audio_drv_data = devm_kzalloc(&pdev->dev, sizeof(struct audio_drv_data),
                                        GFP_KERNEL);
        if (audio_drv_data == NULL)
@@ -1058,6 +1063,8 @@ static int acp_audio_probe(struct platform_device *pdev)
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        audio_drv_data->acp_mmio = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(audio_drv_data->acp_mmio))
+               return PTR_ERR(audio_drv_data->acp_mmio);
 
        /* The following members gets populated in device 'open'
         * function. Till then interrupts are disabled in 'acp_init'
index 4a56f3d..dcee145 100644 (file)
@@ -64,7 +64,7 @@ config SND_AT91_SOC_SAM9X5_WM8731
 config SND_ATMEL_SOC_CLASSD
        tristate "Atmel ASoC driver for boards using CLASSD"
        depends on ARCH_AT91 || COMPILE_TEST
-       select SND_ATMEL_SOC_DMA
+       select SND_SOC_GENERIC_DMAENGINE_PCM
        select REGMAP_MMIO
        help
          Say Y if you want to add support for Atmel ASoC driver for boards using
index b2d42ec..56564ce 100644 (file)
@@ -2520,7 +2520,7 @@ static struct da7218_pdata *da7218_of_to_pdata(struct snd_soc_codec *codec)
        }
 
        if (da7218->dev_id == DA7218_DEV_ID) {
-               hpldet_np = of_find_node_by_name(np, "da7218_hpldet");
+               hpldet_np = of_get_child_by_name(np, "da7218_hpldet");
                if (!hpldet_np)
                        return pdata;
 
index 5f3c42c..066ea2f 100644 (file)
 #define MSM8916_WCD_ANALOG_RATES (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |\
                        SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000)
 #define MSM8916_WCD_ANALOG_FORMATS (SNDRV_PCM_FMTBIT_S16_LE |\
-                                   SNDRV_PCM_FMTBIT_S24_LE)
+                                   SNDRV_PCM_FMTBIT_S32_LE)
 
 static int btn_mask = SND_JACK_BTN_0 | SND_JACK_BTN_1 |
               SND_JACK_BTN_2 | SND_JACK_BTN_3 | SND_JACK_BTN_4;
index a10a724..13354d6 100644 (file)
                                   SNDRV_PCM_RATE_32000 | \
                                   SNDRV_PCM_RATE_48000)
 #define MSM8916_WCD_DIGITAL_FORMATS (SNDRV_PCM_FMTBIT_S16_LE |\
-                                    SNDRV_PCM_FMTBIT_S24_LE)
+                                    SNDRV_PCM_FMTBIT_S32_LE)
 
 struct msm8916_wcd_digital_priv {
        struct clk *ahbclk, *mclk;
@@ -645,7 +645,7 @@ static int msm8916_wcd_digital_hw_params(struct snd_pcm_substream *substream,
                                    RX_I2S_CTL_RX_I2S_MODE_MASK,
                                    RX_I2S_CTL_RX_I2S_MODE_16);
                break;
-       case SNDRV_PCM_FORMAT_S24_LE:
+       case SNDRV_PCM_FORMAT_S32_LE:
                snd_soc_update_bits(dai->codec, LPASS_CDC_CLK_TX_I2S_CTL,
                                    TX_I2S_CTL_TX_I2S_MODE_MASK,
                                    TX_I2S_CTL_TX_I2S_MODE_32);
index 714ce17..e853a6d 100644 (file)
@@ -905,6 +905,7 @@ static int nau8825_adc_event(struct snd_soc_dapm_widget *w,
 
        switch (event) {
        case SND_SOC_DAPM_POST_PMU:
+               msleep(125);
                regmap_update_bits(nau8825->regmap, NAU8825_REG_ENA_CTRL,
                        NAU8825_ENABLE_ADC, NAU8825_ENABLE_ADC);
                break;
index 2df91db..64bf26c 100644 (file)
@@ -289,6 +289,8 @@ static int rt5514_spi_pcm_probe(struct snd_soc_platform *platform)
                        dev_err(&rt5514_spi->dev,
                                "%s Failed to reguest IRQ: %d\n", __func__,
                                ret);
+               else
+                       device_init_wakeup(rt5514_dsp->dev, true);
        }
 
        return 0;
@@ -456,8 +458,6 @@ static int rt5514_spi_probe(struct spi_device *spi)
                return ret;
        }
 
-       device_init_wakeup(&spi->dev, true);
-
        return 0;
 }
 
@@ -482,10 +482,13 @@ static int __maybe_unused rt5514_resume(struct device *dev)
        if (device_may_wakeup(dev))
                disable_irq_wake(irq);
 
-       if (rt5514_dsp->substream) {
-               rt5514_spi_burst_read(RT5514_IRQ_CTRL, (u8 *)&buf, sizeof(buf));
-               if (buf[0] & RT5514_IRQ_STATUS_BIT)
-                       rt5514_schedule_copy(rt5514_dsp);
+       if (rt5514_dsp) {
+               if (rt5514_dsp->substream) {
+                       rt5514_spi_burst_read(RT5514_IRQ_CTRL, (u8 *)&buf,
+                               sizeof(buf));
+                       if (buf[0] & RT5514_IRQ_STATUS_BIT)
+                               rt5514_schedule_copy(rt5514_dsp);
+               }
        }
 
        return 0;
index 2a5b5d7..2dd6e9f 100644 (file)
@@ -496,7 +496,7 @@ static const struct snd_soc_dapm_widget rt5514_dapm_widgets[] = {
        SND_SOC_DAPM_PGA("DMIC1", SND_SOC_NOPM, 0, 0, NULL, 0),
        SND_SOC_DAPM_PGA("DMIC2", SND_SOC_NOPM, 0, 0, NULL, 0),
 
-       SND_SOC_DAPM_SUPPLY("DMIC CLK", SND_SOC_NOPM, 0, 0,
+       SND_SOC_DAPM_SUPPLY_S("DMIC CLK", 1, SND_SOC_NOPM, 0, 0,
                rt5514_set_dmic_clk, SND_SOC_DAPM_PRE_PMU),
 
        SND_SOC_DAPM_SUPPLY("ADC CLK", RT5514_CLK_CTRL1,
index f020d2d..edc152c 100644 (file)
@@ -3823,6 +3823,8 @@ static int rt5645_i2c_probe(struct i2c_client *i2c,
        regmap_read(regmap, RT5645_VENDOR_ID, &val);
        rt5645->v_id = val & 0xff;
 
+       regmap_write(rt5645->regmap, RT5645_AD_DA_MIXER, 0x8080);
+
        ret = regmap_register_patch(rt5645->regmap, init_list,
                                    ARRAY_SIZE(init_list));
        if (ret != 0)
index b036c9d..d329bf7 100644 (file)
@@ -1560,6 +1560,10 @@ static int rt5663_jack_detect(struct snd_soc_codec *codec, int jack_insert)
                        RT5663_IRQ_POW_SAV_MASK, RT5663_IRQ_POW_SAV_EN);
                snd_soc_update_bits(codec, RT5663_IRQ_1,
                        RT5663_EN_IRQ_JD1_MASK, RT5663_EN_IRQ_JD1_EN);
+               snd_soc_update_bits(codec, RT5663_EM_JACK_TYPE_1,
+                       RT5663_EM_JD_MASK, RT5663_EM_JD_RST);
+               snd_soc_update_bits(codec, RT5663_EM_JACK_TYPE_1,
+                       RT5663_EM_JD_MASK, RT5663_EM_JD_NOR);
 
                while (true) {
                        regmap_read(rt5663->regmap, RT5663_INT_ST_2, &val);
index c5a9b69..03adc80 100644 (file)
 #define RT5663_POL_EXT_JD_SHIFT                        10
 #define RT5663_POL_EXT_JD_EN                   (0x1 << 10)
 #define RT5663_POL_EXT_JD_DIS                  (0x0 << 10)
+#define RT5663_EM_JD_MASK                      (0x1 << 7)
+#define RT5663_EM_JD_SHIFT                     7
+#define RT5663_EM_JD_NOR                       (0x1 << 7)
+#define RT5663_EM_JD_RST                       (0x0 << 7)
 
 /* DACREF LDO Control (0x0112)*/
 #define RT5663_PWR_LDO_DACREFL_MASK            (0x1 << 9)
index 730fb20..1ff3edb 100644 (file)
@@ -116,7 +116,7 @@ struct aic31xx_pdata {
 /* INT2 interrupt control */
 #define AIC31XX_INT2CTRL       AIC31XX_REG(0, 49)
 /* GPIO1 control */
-#define AIC31XX_GPIO1          AIC31XX_REG(0, 50)
+#define AIC31XX_GPIO1          AIC31XX_REG(0, 51)
 
 #define AIC31XX_DACPRB         AIC31XX_REG(0, 60)
 /* ADC Instruction Set Register */
index c482b2e..cfe72b9 100644 (file)
@@ -232,7 +232,7 @@ static struct twl4030_codec_data *twl4030_get_pdata(struct snd_soc_codec *codec)
        struct twl4030_codec_data *pdata = dev_get_platdata(codec->dev);
        struct device_node *twl4030_codec_node = NULL;
 
-       twl4030_codec_node = of_find_node_by_name(codec->dev->parent->of_node,
+       twl4030_codec_node = of_get_child_by_name(codec->dev->parent->of_node,
                                                  "codec");
 
        if (!pdata && twl4030_codec_node) {
@@ -241,9 +241,11 @@ static struct twl4030_codec_data *twl4030_get_pdata(struct snd_soc_codec *codec)
                                     GFP_KERNEL);
                if (!pdata) {
                        dev_err(codec->dev, "Can not allocate memory\n");
+                       of_node_put(twl4030_codec_node);
                        return NULL;
                }
                twl4030_setup_pdata_of(pdata, twl4030_codec_node);
+               of_node_put(twl4030_codec_node);
        }
 
        return pdata;
index 65c059b..66e32f5 100644 (file)
@@ -1733,7 +1733,7 @@ static int wm_adsp_load(struct wm_adsp *dsp)
                 le64_to_cpu(footer->timestamp));
 
        while (pos < firmware->size &&
-              pos - firmware->size > sizeof(*region)) {
+              sizeof(*region) < firmware->size - pos) {
                region = (void *)&(firmware->data[pos]);
                region_name = "Unknown";
                reg = 0;
@@ -1782,8 +1782,8 @@ static int wm_adsp_load(struct wm_adsp *dsp)
                         regions, le32_to_cpu(region->len), offset,
                         region_name);
 
-               if ((pos + le32_to_cpu(region->len) + sizeof(*region)) >
-                   firmware->size) {
+               if (le32_to_cpu(region->len) >
+                   firmware->size - pos - sizeof(*region)) {
                        adsp_err(dsp,
                                 "%s.%d: %s region len %d bytes exceeds file length %zu\n",
                                 file, regions, region_name,
@@ -2253,7 +2253,7 @@ static int wm_adsp_load_coeff(struct wm_adsp *dsp)
 
        blocks = 0;
        while (pos < firmware->size &&
-              pos - firmware->size > sizeof(*blk)) {
+              sizeof(*blk) < firmware->size - pos) {
                blk = (void *)(&firmware->data[pos]);
 
                type = le16_to_cpu(blk->type);
@@ -2327,8 +2327,8 @@ static int wm_adsp_load_coeff(struct wm_adsp *dsp)
                }
 
                if (reg) {
-                       if ((pos + le32_to_cpu(blk->len) + sizeof(*blk)) >
-                           firmware->size) {
+                       if (le32_to_cpu(blk->len) >
+                           firmware->size - pos - sizeof(*blk)) {
                                adsp_err(dsp,
                                         "%s.%d: %s region len %d bytes exceeds file length %zu\n",
                                         file, blocks, region_name,
index 0f163ab..52c27a3 100644 (file)
 #define ASRFSTi_OUTPUT_FIFO_SHIFT      12
 #define ASRFSTi_OUTPUT_FIFO_MASK       (((1 << ASRFSTi_OUTPUT_FIFO_WIDTH) - 1) << ASRFSTi_OUTPUT_FIFO_SHIFT)
 #define ASRFSTi_IAEi_SHIFT             11
-#define ASRFSTi_IAEi_MASK              (1 << ASRFSTi_OAFi_SHIFT)
-#define ASRFSTi_IAEi                   (1 << ASRFSTi_OAFi_SHIFT)
+#define ASRFSTi_IAEi_MASK              (1 << ASRFSTi_IAEi_SHIFT)
+#define ASRFSTi_IAEi                   (1 << ASRFSTi_IAEi_SHIFT)
 #define ASRFSTi_INPUT_FIFO_WIDTH       7
 #define ASRFSTi_INPUT_FIFO_SHIFT       0
 #define ASRFSTi_INPUT_FIFO_MASK                ((1 << ASRFSTi_INPUT_FIFO_WIDTH) - 1)
index f2f51e0..424bafa 100644 (file)
@@ -38,6 +38,7 @@
 #include <linux/ctype.h>
 #include <linux/device.h>
 #include <linux/delay.h>
+#include <linux/mutex.h>
 #include <linux/slab.h>
 #include <linux/spinlock.h>
 #include <linux/of.h>
@@ -265,6 +266,8 @@ struct fsl_ssi_private {
 
        u32 fifo_watermark;
        u32 dma_maxburst;
+
+       struct mutex ac97_reg_lock;
 };
 
 /*
@@ -1260,11 +1263,13 @@ static void fsl_ssi_ac97_write(struct snd_ac97 *ac97, unsigned short reg,
        if (reg > 0x7f)
                return;
 
+       mutex_lock(&fsl_ac97_data->ac97_reg_lock);
+
        ret = clk_prepare_enable(fsl_ac97_data->clk);
        if (ret) {
                pr_err("ac97 write clk_prepare_enable failed: %d\n",
                        ret);
-               return;
+               goto ret_unlock;
        }
 
        lreg = reg <<  12;
@@ -1278,6 +1283,9 @@ static void fsl_ssi_ac97_write(struct snd_ac97 *ac97, unsigned short reg,
        udelay(100);
 
        clk_disable_unprepare(fsl_ac97_data->clk);
+
+ret_unlock:
+       mutex_unlock(&fsl_ac97_data->ac97_reg_lock);
 }
 
 static unsigned short fsl_ssi_ac97_read(struct snd_ac97 *ac97,
@@ -1285,16 +1293,18 @@ static unsigned short fsl_ssi_ac97_read(struct snd_ac97 *ac97,
 {
        struct regmap *regs = fsl_ac97_data->regs;
 
-       unsigned short val = -1;
+       unsigned short val = 0;
        u32 reg_val;
        unsigned int lreg;
        int ret;
 
+       mutex_lock(&fsl_ac97_data->ac97_reg_lock);
+
        ret = clk_prepare_enable(fsl_ac97_data->clk);
        if (ret) {
                pr_err("ac97 read clk_prepare_enable failed: %d\n",
                        ret);
-               return -1;
+               goto ret_unlock;
        }
 
        lreg = (reg & 0x7f) <<  12;
@@ -1309,6 +1319,8 @@ static unsigned short fsl_ssi_ac97_read(struct snd_ac97 *ac97,
 
        clk_disable_unprepare(fsl_ac97_data->clk);
 
+ret_unlock:
+       mutex_unlock(&fsl_ac97_data->ac97_reg_lock);
        return val;
 }
 
@@ -1458,12 +1470,6 @@ static int fsl_ssi_probe(struct platform_device *pdev)
                                sizeof(fsl_ssi_ac97_dai));
 
                fsl_ac97_data = ssi_private;
-
-               ret = snd_soc_set_ac97_ops_of_reset(&fsl_ssi_ac97_ops, pdev);
-               if (ret) {
-                       dev_err(&pdev->dev, "could not set AC'97 ops\n");
-                       return ret;
-               }
        } else {
                /* Initialize this copy of the CPU DAI driver structure */
                memcpy(&ssi_private->cpu_dai_drv, &fsl_ssi_dai_template,
@@ -1574,6 +1580,15 @@ static int fsl_ssi_probe(struct platform_device *pdev)
                        return ret;
        }
 
+       if (fsl_ssi_is_ac97(ssi_private)) {
+               mutex_init(&ssi_private->ac97_reg_lock);
+               ret = snd_soc_set_ac97_ops_of_reset(&fsl_ssi_ac97_ops, pdev);
+               if (ret) {
+                       dev_err(&pdev->dev, "could not set AC'97 ops\n");
+                       goto error_ac97_ops;
+               }
+       }
+
        ret = devm_snd_soc_register_component(&pdev->dev, &fsl_ssi_component,
                                              &ssi_private->cpu_dai_drv, 1);
        if (ret) {
@@ -1657,6 +1672,13 @@ error_sound_card:
        fsl_ssi_debugfs_remove(&ssi_private->dbg_stats);
 
 error_asoc_register:
+       if (fsl_ssi_is_ac97(ssi_private))
+               snd_soc_set_ac97_ops(NULL);
+
+error_ac97_ops:
+       if (fsl_ssi_is_ac97(ssi_private))
+               mutex_destroy(&ssi_private->ac97_reg_lock);
+
        if (ssi_private->soc->imx)
                fsl_ssi_imx_clean(pdev, ssi_private);
 
@@ -1675,8 +1697,10 @@ static int fsl_ssi_remove(struct platform_device *pdev)
        if (ssi_private->soc->imx)
                fsl_ssi_imx_clean(pdev, ssi_private);
 
-       if (fsl_ssi_is_ac97(ssi_private))
+       if (fsl_ssi_is_ac97(ssi_private)) {
                snd_soc_set_ac97_ops(NULL);
+               mutex_destroy(&ssi_private->ac97_reg_lock);
+       }
 
        return 0;
 }
index 6f9a8bc..6dcad0a 100644 (file)
@@ -101,7 +101,7 @@ static const struct snd_soc_dapm_route kabylake_map[] = {
        { "ssp0 Tx", NULL, "spk_out" },
 
        { "AIF Playback", NULL, "ssp1 Tx" },
-       { "ssp1 Tx", NULL, "hs_out" },
+       { "ssp1 Tx", NULL, "codec1_out" },
 
        { "hs_in", NULL, "ssp1 Rx" },
        { "ssp1 Rx", NULL, "AIF Capture" },
index 6072164..271ae3c 100644 (file)
@@ -109,7 +109,7 @@ static const struct snd_soc_dapm_route kabylake_map[] = {
        { "ssp0 Tx", NULL, "spk_out" },
 
        { "AIF Playback", NULL, "ssp1 Tx" },
-       { "ssp1 Tx", NULL, "hs_out" },
+       { "ssp1 Tx", NULL, "codec1_out" },
 
        { "hs_in", NULL, "ssp1 Rx" },
        { "ssp1 Rx", NULL, "AIF Capture" },
index d14c50a..3eaac41 100644 (file)
@@ -119,11 +119,16 @@ static bool skl_check_ep_match(struct device *dev, struct nhlt_endpoint *epnt,
 
        if ((epnt->virtual_bus_id == instance_id) &&
                        (epnt->linktype == link_type) &&
-                       (epnt->direction == dirn) &&
-                       (epnt->device_type == dev_type))
-               return true;
-       else
-               return false;
+                       (epnt->direction == dirn)) {
+               /* do not check dev_type for DMIC link type */
+               if (epnt->linktype == NHLT_LINK_DMIC)
+                       return true;
+
+               if (epnt->device_type == dev_type)
+                       return true;
+       }
+
+       return false;
 }
 
 struct nhlt_specific_cfg
index a072bcf..81923da 100644 (file)
@@ -2908,7 +2908,7 @@ static int skl_tplg_control_load(struct snd_soc_component *cmpnt,
                break;
 
        default:
-               dev_warn(bus->dev, "Control load not supported %d:%d:%d\n",
+               dev_dbg(bus->dev, "Control load not supported %d:%d:%d\n",
                        hdr->ops.get, hdr->ops.put, hdr->ops.info);
                break;
        }
index ee5055d..a89fe9b 100644 (file)
@@ -322,26 +322,30 @@ static int rk_spdif_probe(struct platform_device *pdev)
        spdif->mclk = devm_clk_get(&pdev->dev, "mclk");
        if (IS_ERR(spdif->mclk)) {
                dev_err(&pdev->dev, "Can't retrieve rk_spdif master clock\n");
-               return PTR_ERR(spdif->mclk);
+               ret = PTR_ERR(spdif->mclk);
+               goto err_disable_hclk;
        }
 
        ret = clk_prepare_enable(spdif->mclk);
        if (ret) {
                dev_err(spdif->dev, "clock enable failed %d\n", ret);
-               return ret;
+               goto err_disable_clocks;
        }
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        regs = devm_ioremap_resource(&pdev->dev, res);
-       if (IS_ERR(regs))
-               return PTR_ERR(regs);
+       if (IS_ERR(regs)) {
+               ret = PTR_ERR(regs);
+               goto err_disable_clocks;
+       }
 
        spdif->regmap = devm_regmap_init_mmio_clk(&pdev->dev, "hclk", regs,
                                                  &rk_spdif_regmap_config);
        if (IS_ERR(spdif->regmap)) {
                dev_err(&pdev->dev,
                        "Failed to initialise managed register map\n");
-               return PTR_ERR(spdif->regmap);
+               ret = PTR_ERR(spdif->regmap);
+               goto err_disable_clocks;
        }
 
        spdif->playback_dma_data.addr = res->start + SPDIF_SMPDR;
@@ -373,6 +377,10 @@ static int rk_spdif_probe(struct platform_device *pdev)
 
 err_pm_runtime:
        pm_runtime_disable(&pdev->dev);
+err_disable_clocks:
+       clk_disable_unprepare(spdif->mclk);
+err_disable_hclk:
+       clk_disable_unprepare(spdif->hclk);
 
        return ret;
 }
index 8ddb087..4672688 100644 (file)
@@ -222,7 +222,7 @@ int rsnd_adg_set_cmd_timsel_gen2(struct rsnd_mod *cmd_mod,
                                   NULL, &val, NULL);
 
        val  = val      << shift;
-       mask = 0xffff   << shift;
+       mask = 0x0f1f   << shift;
 
        rsnd_mod_bset(adg_mod, CMDOUT_TIMSEL, mask, val);
 
@@ -250,7 +250,7 @@ int rsnd_adg_set_src_timesel_gen2(struct rsnd_mod *src_mod,
 
        in   = in       << shift;
        out  = out      << shift;
-       mask = 0xffff   << shift;
+       mask = 0x0f1f   << shift;
 
        switch (id / 2) {
        case 0:
@@ -380,7 +380,7 @@ int rsnd_adg_ssi_clk_try_start(struct rsnd_mod *ssi_mod, unsigned int rate)
                        ckr = 0x80000000;
        }
 
-       rsnd_mod_bset(adg_mod, BRGCKR, 0x80FF0000, adg->ckr | ckr);
+       rsnd_mod_bset(adg_mod, BRGCKR, 0x80770000, adg->ckr | ckr);
        rsnd_mod_write(adg_mod, BRRA,  adg->rbga);
        rsnd_mod_write(adg_mod, BRRB,  adg->rbgb);
 
index c70eb20..f12a88a 100644 (file)
@@ -1332,8 +1332,8 @@ static int rsnd_pcm_new(struct snd_soc_pcm_runtime *rtd)
 
        return snd_pcm_lib_preallocate_pages_for_all(
                rtd->pcm,
-               SNDRV_DMA_TYPE_CONTINUOUS,
-               snd_dma_continuous_data(GFP_KERNEL),
+               SNDRV_DMA_TYPE_DEV,
+               rtd->card->snd_card->dev,
                PREALLOC_BUFFER, PREALLOC_BUFFER_MAX);
 }
 
index fd557ab..4d750bd 100644 (file)
 struct rsnd_dmaen {
        struct dma_chan         *chan;
        dma_cookie_t            cookie;
-       dma_addr_t              dma_buf;
        unsigned int            dma_len;
-       unsigned int            dma_period;
-       unsigned int            dma_cnt;
 };
 
 struct rsnd_dmapp {
@@ -71,38 +68,10 @@ static struct rsnd_mod mem = {
 /*
  *             Audio DMAC
  */
-#define rsnd_dmaen_sync(dmaen, io, i)  __rsnd_dmaen_sync(dmaen, io, i, 1)
-#define rsnd_dmaen_unsync(dmaen, io, i)        __rsnd_dmaen_sync(dmaen, io, i, 0)
-static void __rsnd_dmaen_sync(struct rsnd_dmaen *dmaen, struct rsnd_dai_stream *io,
-                             int i, int sync)
-{
-       struct device *dev = dmaen->chan->device->dev;
-       enum dma_data_direction dir;
-       int is_play = rsnd_io_is_play(io);
-       dma_addr_t buf;
-       int len, max;
-       size_t period;
-
-       len     = dmaen->dma_len;
-       period  = dmaen->dma_period;
-       max     = len / period;
-       i       = i % max;
-       buf     = dmaen->dma_buf + (period * i);
-
-       dir = is_play ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
-
-       if (sync)
-               dma_sync_single_for_device(dev, buf, period, dir);
-       else
-               dma_sync_single_for_cpu(dev, buf, period, dir);
-}
-
 static void __rsnd_dmaen_complete(struct rsnd_mod *mod,
                                  struct rsnd_dai_stream *io)
 {
        struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
-       struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
-       struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma);
        bool elapsed = false;
        unsigned long flags;
 
@@ -115,22 +84,9 @@ static void __rsnd_dmaen_complete(struct rsnd_mod *mod,
         */
        spin_lock_irqsave(&priv->lock, flags);
 
-       if (rsnd_io_is_working(io)) {
-               rsnd_dmaen_unsync(dmaen, io, dmaen->dma_cnt);
-
-               /*
-                * Next period is already started.
-                * Let's sync Next Next period
-                * see
-                *      rsnd_dmaen_start()
-                */
-               rsnd_dmaen_sync(dmaen, io, dmaen->dma_cnt + 2);
-
+       if (rsnd_io_is_working(io))
                elapsed = true;
 
-               dmaen->dma_cnt++;
-       }
-
        spin_unlock_irqrestore(&priv->lock, flags);
 
        if (elapsed)
@@ -165,14 +121,8 @@ static int rsnd_dmaen_stop(struct rsnd_mod *mod,
        struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
        struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma);
 
-       if (dmaen->chan) {
-               int is_play = rsnd_io_is_play(io);
-
+       if (dmaen->chan)
                dmaengine_terminate_all(dmaen->chan);
-               dma_unmap_single(dmaen->chan->device->dev,
-                                dmaen->dma_buf, dmaen->dma_len,
-                                is_play ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
-       }
 
        return 0;
 }
@@ -237,11 +187,7 @@ static int rsnd_dmaen_start(struct rsnd_mod *mod,
        struct device *dev = rsnd_priv_to_dev(priv);
        struct dma_async_tx_descriptor *desc;
        struct dma_slave_config cfg = {};
-       dma_addr_t buf;
-       size_t len;
-       size_t period;
        int is_play = rsnd_io_is_play(io);
-       int i;
        int ret;
 
        cfg.direction   = is_play ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
@@ -258,19 +204,10 @@ static int rsnd_dmaen_start(struct rsnd_mod *mod,
        if (ret < 0)
                return ret;
 
-       len     = snd_pcm_lib_buffer_bytes(substream);
-       period  = snd_pcm_lib_period_bytes(substream);
-       buf     = dma_map_single(dmaen->chan->device->dev,
-                                substream->runtime->dma_area,
-                                len,
-                                is_play ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
-       if (dma_mapping_error(dmaen->chan->device->dev, buf)) {
-               dev_err(dev, "dma map failed\n");
-               return -EIO;
-       }
-
        desc = dmaengine_prep_dma_cyclic(dmaen->chan,
-                                        buf, len, period,
+                                        substream->runtime->dma_addr,
+                                        snd_pcm_lib_buffer_bytes(substream),
+                                        snd_pcm_lib_period_bytes(substream),
                                         is_play ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
                                         DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 
@@ -282,18 +219,7 @@ static int rsnd_dmaen_start(struct rsnd_mod *mod,
        desc->callback          = rsnd_dmaen_complete;
        desc->callback_param    = rsnd_mod_get(dma);
 
-       dmaen->dma_buf          = buf;
-       dmaen->dma_len          = len;
-       dmaen->dma_period       = period;
-       dmaen->dma_cnt          = 0;
-
-       /*
-        * synchronize this and next period
-        * see
-        *      __rsnd_dmaen_complete()
-        */
-       for (i = 0; i < 2; i++)
-               rsnd_dmaen_sync(dmaen, io, i);
+       dmaen->dma_len          = snd_pcm_lib_buffer_bytes(substream);
 
        dmaen->cookie = dmaengine_submit(desc);
        if (dmaen->cookie < 0) {
index fece1e5..cbf3bf3 100644 (file)
@@ -446,25 +446,29 @@ static bool rsnd_ssi_pointer_update(struct rsnd_mod *mod,
                                    int byte)
 {
        struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod);
+       bool ret = false;
+       int byte_pos;
 
-       ssi->byte_pos += byte;
+       byte_pos = ssi->byte_pos + byte;
 
-       if (ssi->byte_pos >= ssi->next_period_byte) {
+       if (byte_pos >= ssi->next_period_byte) {
                struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
 
                ssi->period_pos++;
                ssi->next_period_byte += ssi->byte_per_period;
 
                if (ssi->period_pos >= runtime->periods) {
-                       ssi->byte_pos = 0;
+                       byte_pos = 0;
                        ssi->period_pos = 0;
                        ssi->next_period_byte = ssi->byte_per_period;
                }
 
-               return true;
+               ret = true;
        }
 
-       return false;
+       WRITE_ONCE(ssi->byte_pos, byte_pos);
+
+       return ret;
 }
 
 /*
@@ -838,7 +842,7 @@ static int rsnd_ssi_pointer(struct rsnd_mod *mod,
        struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod);
        struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
 
-       *pointer = bytes_to_frames(runtime, ssi->byte_pos);
+       *pointer = bytes_to_frames(runtime, READ_ONCE(ssi->byte_pos));
 
        return 0;
 }
index 4d94875..6ff8a36 100644 (file)
@@ -125,6 +125,7 @@ static int rsnd_ssiu_init_gen2(struct rsnd_mod *mod,
 {
        int hdmi = rsnd_ssi_hdmi_port(io);
        int ret;
+       u32 mode = 0;
 
        ret = rsnd_ssiu_init(mod, io, priv);
        if (ret < 0)
@@ -136,9 +137,11 @@ static int rsnd_ssiu_init_gen2(struct rsnd_mod *mod,
                 * see
                 *      rsnd_ssi_config_init()
                 */
-               rsnd_mod_write(mod, SSI_MODE, 0x1);
+               mode = 0x1;
        }
 
+       rsnd_mod_write(mod, SSI_MODE, mode);
+
        if (rsnd_ssi_use_busif(io)) {
                rsnd_mod_write(mod, SSI_BUSIF_ADINR,
                               rsnd_get_adinr_bit(mod, io) |
index 7c9e361..2b4ceda 100644 (file)
@@ -2173,20 +2173,25 @@ static int parse_audio_selector_unit(struct mixer_build *state, int unitid,
        kctl->private_value = (unsigned long)namelist;
        kctl->private_free = usb_mixer_selector_elem_free;
 
-       nameid = uac_selector_unit_iSelector(desc);
+       /* check the static mapping table at first */
        len = check_mapped_name(map, kctl->id.name, sizeof(kctl->id.name));
-       if (len)
-               ;
-       else if (nameid)
-               len = snd_usb_copy_string_desc(state, nameid, kctl->id.name,
-                                        sizeof(kctl->id.name));
-       else
-               len = get_term_name(state, &state->oterm,
-                                   kctl->id.name, sizeof(kctl->id.name), 0);
-
        if (!len) {
-               strlcpy(kctl->id.name, "USB", sizeof(kctl->id.name));
+               /* no mapping ? */
+               /* if iSelector is given, use it */
+               nameid = uac_selector_unit_iSelector(desc);
+               if (nameid)
+                       len = snd_usb_copy_string_desc(state, nameid,
+                                                      kctl->id.name,
+                                                      sizeof(kctl->id.name));
+               /* ... or pick up the terminal name at next */
+               if (!len)
+                       len = get_term_name(state, &state->oterm,
+                                   kctl->id.name, sizeof(kctl->id.name), 0);
+               /* ... or use the fixed string "USB" as the last resort */
+               if (!len)
+                       strlcpy(kctl->id.name, "USB", sizeof(kctl->id.name));
 
+               /* and add the proper suffix */
                if (desc->bDescriptorSubtype == UAC2_CLOCK_SELECTOR)
                        append_ctl_name(kctl, " Clock Source");
                else if ((state->oterm.type & 0xff00) == 0x0100)
index 77eecaa..a66ef57 100644 (file)
@@ -1166,10 +1166,11 @@ static bool is_marantz_denon_dac(unsigned int id)
 /* TEAC UD-501/UD-503/NT-503 USB DACs need a vendor cmd to switch
  * between PCM/DOP and native DSD mode
  */
-static bool is_teac_50X_dac(unsigned int id)
+static bool is_teac_dsd_dac(unsigned int id)
 {
        switch (id) {
        case USB_ID(0x0644, 0x8043): /* TEAC UD-501/UD-503/NT-503 */
+       case USB_ID(0x0644, 0x8044): /* Esoteric D-05X */
                return true;
        }
        return false;
@@ -1202,7 +1203,7 @@ int snd_usb_select_mode_quirk(struct snd_usb_substream *subs,
                        break;
                }
                mdelay(20);
-       } else if (is_teac_50X_dac(subs->stream->chip->usb_id)) {
+       } else if (is_teac_dsd_dac(subs->stream->chip->usb_id)) {
                /* Vendor mode switch cmd is required. */
                switch (fmt->altsetting) {
                case 3: /* DSD mode (DSD_U32) requested */
@@ -1392,7 +1393,7 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
        }
 
        /* TEAC devices with USB DAC functionality */
-       if (is_teac_50X_dac(chip->usb_id)) {
+       if (is_teac_dsd_dac(chip->usb_id)) {
                if (fp->altsetting == 3)
                        return SNDRV_PCM_FMTBIT_DSD_U32_BE;
        }
index cefe7c7..0a8e37a 100644 (file)
@@ -2,7 +2,7 @@
 #ifndef _UAPI__ASM_BPF_PERF_EVENT_H__
 #define _UAPI__ASM_BPF_PERF_EVENT_H__
 
-#include <asm/ptrace.h>
+#include "ptrace.h"
 
 typedef user_pt_regs bpf_user_pt_regs_t;
 
index 07a6697..c8ec0ae 100644 (file)
@@ -9,6 +9,35 @@ MAKE = make
 CFLAGS += -Wall -O2
 CFLAGS += -D__EXPORTED_HEADERS__ -I../../include/uapi -I../../include
 
+ifeq ($(srctree),)
+srctree := $(patsubst %/,%,$(dir $(CURDIR)))
+srctree := $(patsubst %/,%,$(dir $(srctree)))
+endif
+
+FEATURE_USER = .bpf
+FEATURE_TESTS = libbfd disassembler-four-args
+FEATURE_DISPLAY = libbfd disassembler-four-args
+
+check_feat := 1
+NON_CHECK_FEAT_TARGETS := clean bpftool_clean
+ifdef MAKECMDGOALS
+ifeq ($(filter-out $(NON_CHECK_FEAT_TARGETS),$(MAKECMDGOALS)),)
+  check_feat := 0
+endif
+endif
+
+ifeq ($(check_feat),1)
+ifeq ($(FEATURES_DUMP),)
+include $(srctree)/tools/build/Makefile.feature
+else
+include $(FEATURES_DUMP)
+endif
+endif
+
+ifeq ($(feature-disassembler-four-args), 1)
+CFLAGS += -DDISASM_FOUR_ARGS_SIGNATURE
+endif
+
 %.yacc.c: %.y
        $(YACC) -o $@ -d $<
 
index 75bf526..30044bc 100644 (file)
@@ -72,7 +72,14 @@ static void get_asm_insns(uint8_t *image, size_t len, int opcodes)
 
        disassemble_init_for_target(&info);
 
+#ifdef DISASM_FOUR_ARGS_SIGNATURE
+       disassemble = disassembler(info.arch,
+                                  bfd_big_endian(bfdf),
+                                  info.mach,
+                                  bfdf);
+#else
        disassemble = disassembler(bfdf);
+#endif
        assert(disassemble);
 
        do {
index 45c71b1..2fe2a1b 100644 (file)
@@ -15,12 +15,12 @@ SYNOPSIS
        *OPTIONS* := { { **-j** | **--json** } [{ **-p** | **--pretty** }] | { **-f** | **--bpffs** } }
 
        *COMMANDS* :=
-       { **list** | **attach** | **detach** | **help** }
+       { **show** | **list** | **attach** | **detach** | **help** }
 
 MAP COMMANDS
 =============
 
-|      **bpftool** **cgroup list** *CGROUP*
+|      **bpftool** **cgroup { show | list }** *CGROUP*
 |      **bpftool** **cgroup attach** *CGROUP* *ATTACH_TYPE* *PROG* [*ATTACH_FLAGS*]
 |      **bpftool** **cgroup detach** *CGROUP* *ATTACH_TYPE* *PROG*
 |      **bpftool** **cgroup help**
@@ -31,7 +31,7 @@ MAP COMMANDS
 
 DESCRIPTION
 ===========
-       **bpftool cgroup list** *CGROUP*
+       **bpftool cgroup { show | list }** *CGROUP*
                  List all programs attached to the cgroup *CGROUP*.
 
                  Output will start with program ID followed by attach type,
index 421cabc..0ab32b3 100644 (file)
@@ -15,13 +15,13 @@ SYNOPSIS
        *OPTIONS* := { { **-j** | **--json** } [{ **-p** | **--pretty** }] | { **-f** | **--bpffs** } }
 
        *COMMANDS* :=
-       { **show** | **dump** | **update** | **lookup** | **getnext** | **delete**
+       { **show** | **list** | **dump** | **update** | **lookup** | **getnext** | **delete**
        | **pin** | **help** }
 
 MAP COMMANDS
 =============
 
-|      **bpftool** **map show**   [*MAP*]
+|      **bpftool** **map { show | list }**   [*MAP*]
 |      **bpftool** **map dump**    *MAP*
 |      **bpftool** **map update**  *MAP*  **key** *BYTES*   **value** *VALUE* [*UPDATE_FLAGS*]
 |      **bpftool** **map lookup**  *MAP*  **key** *BYTES*
@@ -36,7 +36,7 @@ MAP COMMANDS
 
 DESCRIPTION
 ===========
-       **bpftool map show**   [*MAP*]
+       **bpftool map { show | list }**   [*MAP*]
                  Show information about loaded maps.  If *MAP* is specified
                  show information only about given map, otherwise list all
                  maps currently loaded on the system.
index 81c97c0..e4ceee7 100644 (file)
@@ -15,12 +15,12 @@ SYNOPSIS
        *OPTIONS* := { { **-j** | **--json** } [{ **-p** | **--pretty** }] | { **-f** | **--bpffs** } }
 
        *COMMANDS* :=
-       { **show** | **dump xlated** | **dump jited** | **pin** | **load** | **help** }
+       { **show** | **list** | **dump xlated** | **dump jited** | **pin** | **load** | **help** }
 
 MAP COMMANDS
 =============
 
-|      **bpftool** **prog show** [*PROG*]
+|      **bpftool** **prog { show | list }** [*PROG*]
 |      **bpftool** **prog dump xlated** *PROG* [{**file** *FILE* | **opcodes**}]
 |      **bpftool** **prog dump jited**  *PROG* [{**file** *FILE* | **opcodes**}]
 |      **bpftool** **prog pin** *PROG* *FILE*
@@ -31,7 +31,7 @@ MAP COMMANDS
 
 DESCRIPTION
 ===========
-       **bpftool prog show** [*PROG*]
+       **bpftool prog { show | list }** [*PROG*]
                  Show information about loaded programs.  If *PROG* is
                  specified show information only about given program, otherwise
                  list all programs currently loaded on the system.
index 6732a5a..20689a3 100644 (file)
@@ -22,13 +22,13 @@ SYNOPSIS
        | { **-j** | **--json** } [{ **-p** | **--pretty** }] }
 
        *MAP-COMMANDS* :=
-       { **show** | **dump** | **update** | **lookup** | **getnext** | **delete**
+       { **show** | **list** | **dump** | **update** | **lookup** | **getnext** | **delete**
        | **pin** | **help** }
 
-       *PROG-COMMANDS* := { **show** | **dump jited** | **dump xlated** | **pin**
+       *PROG-COMMANDS* := { **show** | **list** | **dump jited** | **dump xlated** | **pin**
        | **load** | **help** }
 
-       *CGROUP-COMMANDS* := { **list** | **attach** | **detach** | **help** }
+       *CGROUP-COMMANDS* := { **show** | **list** | **attach** | **detach** | **help** }
 
 DESCRIPTION
 ===========
index 3f17ad3..2237bc4 100644 (file)
@@ -23,6 +23,8 @@ endif
 
 LIBBPF = $(BPF_PATH)libbpf.a
 
+BPFTOOL_VERSION=$(shell make --no-print-directory -sC ../../.. kernelversion)
+
 $(LIBBPF): FORCE
        $(Q)$(MAKE) -C $(BPF_DIR) OUTPUT=$(OUTPUT) $(OUTPUT)libbpf.a FEATURES_DUMP=$(FEATURE_DUMP_EXPORT)
 
@@ -38,11 +40,36 @@ CC = gcc
 CFLAGS += -O2
 CFLAGS += -W -Wall -Wextra -Wno-unused-parameter -Wshadow
 CFLAGS += -D__EXPORTED_HEADERS__ -I$(srctree)/tools/include/uapi -I$(srctree)/tools/include -I$(srctree)/tools/lib/bpf -I$(srctree)/kernel/bpf/
+CFLAGS += -DBPFTOOL_VERSION='"$(BPFTOOL_VERSION)"'
 LIBS = -lelf -lbfd -lopcodes $(LIBBPF)
 
 INSTALL ?= install
 RM ?= rm -f
 
+FEATURE_USER = .bpftool
+FEATURE_TESTS = libbfd disassembler-four-args
+FEATURE_DISPLAY = libbfd disassembler-four-args
+
+check_feat := 1
+NON_CHECK_FEAT_TARGETS := clean uninstall doc doc-clean doc-install doc-uninstall
+ifdef MAKECMDGOALS
+ifeq ($(filter-out $(NON_CHECK_FEAT_TARGETS),$(MAKECMDGOALS)),)
+  check_feat := 0
+endif
+endif
+
+ifeq ($(check_feat),1)
+ifeq ($(FEATURES_DUMP),)
+include $(srctree)/tools/build/Makefile.feature
+else
+include $(FEATURES_DUMP)
+endif
+endif
+
+ifeq ($(feature-disassembler-four-args), 1)
+CFLAGS += -DDISASM_FOUR_ARGS_SIGNATURE
+endif
+
 include $(wildcard *.d)
 
 all: $(OUTPUT)bpftool
index 7febee0..0137866 100644 (file)
@@ -197,7 +197,7 @@ _bpftool()
 
             local PROG_TYPE='id pinned tag'
             case $command in
-                show)
+                show|list)
                     [[ $prev != "$command" ]] && return 0
                     COMPREPLY=( $( compgen -W "$PROG_TYPE" -- "$cur" ) )
                     return 0
@@ -232,7 +232,7 @@ _bpftool()
                     ;;
                 *)
                     [[ $prev == $object ]] && \
-                        COMPREPLY=( $( compgen -W 'dump help pin show' -- \
+                        COMPREPLY=( $( compgen -W 'dump help pin show list' -- \
                             "$cur" ) )
                     ;;
             esac
@@ -240,7 +240,7 @@ _bpftool()
         map)
             local MAP_TYPE='id pinned'
             case $command in
-                show|dump)
+                show|list|dump)
                     case $prev in
                         $command)
                             COMPREPLY=( $( compgen -W "$MAP_TYPE" -- "$cur" ) )
@@ -343,7 +343,7 @@ _bpftool()
                 *)
                     [[ $prev == $object ]] && \
                         COMPREPLY=( $( compgen -W 'delete dump getnext help \
-                            lookup pin show update' -- "$cur" ) )
+                            lookup pin show list update' -- "$cur" ) )
                     ;;
             esac
             ;;
index 34ca303..cae32a6 100644 (file)
@@ -41,7 +41,7 @@ static enum bpf_attach_type parse_attach_type(const char *str)
        return __MAX_BPF_ATTACH_TYPE;
 }
 
-static int list_bpf_prog(int id, const char *attach_type_str,
+static int show_bpf_prog(int id, const char *attach_type_str,
                         const char *attach_flags_str)
 {
        struct bpf_prog_info info = {};
@@ -77,7 +77,7 @@ static int list_bpf_prog(int id, const char *attach_type_str,
        return 0;
 }
 
-static int list_attached_bpf_progs(int cgroup_fd, enum bpf_attach_type type)
+static int show_attached_bpf_progs(int cgroup_fd, enum bpf_attach_type type)
 {
        __u32 prog_ids[1024] = {0};
        char *attach_flags_str;
@@ -111,29 +111,29 @@ static int list_attached_bpf_progs(int cgroup_fd, enum bpf_attach_type type)
        }
 
        for (iter = 0; iter < prog_cnt; iter++)
-               list_bpf_prog(prog_ids[iter], attach_type_strings[type],
+               show_bpf_prog(prog_ids[iter], attach_type_strings[type],
                              attach_flags_str);
 
        return 0;
 }
 
-static int do_list(int argc, char **argv)
+static int do_show(int argc, char **argv)
 {
        enum bpf_attach_type type;
        int cgroup_fd;
        int ret = -1;
 
        if (argc < 1) {
-               p_err("too few parameters for cgroup list\n");
+               p_err("too few parameters for cgroup show");
                goto exit;
        } else if (argc > 1) {
-               p_err("too many parameters for cgroup list\n");
+               p_err("too many parameters for cgroup show");
                goto exit;
        }
 
        cgroup_fd = open(argv[0], O_RDONLY);
        if (cgroup_fd < 0) {
-               p_err("can't open cgroup %s\n", argv[1]);
+               p_err("can't open cgroup %s", argv[1]);
                goto exit;
        }
 
@@ -147,10 +147,10 @@ static int do_list(int argc, char **argv)
                /*
                 * Not all attach types may be supported, so it's expected,
                 * that some requests will fail.
-                * If we were able to get the list for at least one
+                * If we were able to get the show for at least one
                 * attach type, let's return 0.
                 */
-               if (list_attached_bpf_progs(cgroup_fd, type) == 0)
+               if (show_attached_bpf_progs(cgroup_fd, type) == 0)
                        ret = 0;
        }
 
@@ -171,19 +171,19 @@ static int do_attach(int argc, char **argv)
        int i;
 
        if (argc < 4) {
-               p_err("too few parameters for cgroup attach\n");
+               p_err("too few parameters for cgroup attach");
                goto exit;
        }
 
        cgroup_fd = open(argv[0], O_RDONLY);
        if (cgroup_fd < 0) {
-               p_err("can't open cgroup %s\n", argv[1]);
+               p_err("can't open cgroup %s", argv[1]);
                goto exit;
        }
 
        attach_type = parse_attach_type(argv[1]);
        if (attach_type == __MAX_BPF_ATTACH_TYPE) {
-               p_err("invalid attach type\n");
+               p_err("invalid attach type");
                goto exit_cgroup;
        }
 
@@ -199,7 +199,7 @@ static int do_attach(int argc, char **argv)
                } else if (is_prefix(argv[i], "override")) {
                        attach_flags |= BPF_F_ALLOW_OVERRIDE;
                } else {
-                       p_err("unknown option: %s\n", argv[i]);
+                       p_err("unknown option: %s", argv[i]);
                        goto exit_cgroup;
                }
        }
@@ -229,13 +229,13 @@ static int do_detach(int argc, char **argv)
        int ret = -1;
 
        if (argc < 4) {
-               p_err("too few parameters for cgroup detach\n");
+               p_err("too few parameters for cgroup detach");
                goto exit;
        }
 
        cgroup_fd = open(argv[0], O_RDONLY);
        if (cgroup_fd < 0) {
-               p_err("can't open cgroup %s\n", argv[1]);
+               p_err("can't open cgroup %s", argv[1]);
                goto exit;
        }
 
@@ -277,7 +277,7 @@ static int do_help(int argc, char **argv)
        }
 
        fprintf(stderr,
-               "Usage: %s %s list CGROUP\n"
+               "Usage: %s %s { show | list } CGROUP\n"
                "       %s %s attach CGROUP ATTACH_TYPE PROG [ATTACH_FLAGS]\n"
                "       %s %s detach CGROUP ATTACH_TYPE PROG\n"
                "       %s %s help\n"
@@ -294,7 +294,8 @@ static int do_help(int argc, char **argv)
 }
 
 static const struct cmd cmds[] = {
-       { "list",       do_list },
+       { "show",       do_show },
+       { "list",       do_show },
        { "attach",     do_attach },
        { "detach",     do_detach },
        { "help",       do_help },
index b62c94e..6601c95 100644 (file)
@@ -44,7 +44,9 @@
 #include <unistd.h>
 #include <linux/limits.h>
 #include <linux/magic.h>
+#include <net/if.h>
 #include <sys/mount.h>
+#include <sys/stat.h>
 #include <sys/types.h>
 #include <sys/vfs.h>
 
@@ -412,3 +414,53 @@ void delete_pinned_obj_table(struct pinned_obj_table *tab)
                free(obj);
        }
 }
+
+static char *
+ifindex_to_name_ns(__u32 ifindex, __u32 ns_dev, __u32 ns_ino, char *buf)
+{
+       struct stat st;
+       int err;
+
+       err = stat("/proc/self/ns/net", &st);
+       if (err) {
+               p_err("Can't stat /proc/self: %s", strerror(errno));
+               return NULL;
+       }
+
+       if (st.st_dev != ns_dev || st.st_ino != ns_ino)
+               return NULL;
+
+       return if_indextoname(ifindex, buf);
+}
+
+void print_dev_plain(__u32 ifindex, __u64 ns_dev, __u64 ns_inode)
+{
+       char name[IF_NAMESIZE];
+
+       if (!ifindex)
+               return;
+
+       printf(" dev ");
+       if (ifindex_to_name_ns(ifindex, ns_dev, ns_inode, name))
+               printf("%s", name);
+       else
+               printf("ifindex %u ns_dev %llu ns_ino %llu",
+                      ifindex, ns_dev, ns_inode);
+}
+
+void print_dev_json(__u32 ifindex, __u64 ns_dev, __u64 ns_inode)
+{
+       char name[IF_NAMESIZE];
+
+       if (!ifindex)
+               return;
+
+       jsonw_name(json_wtr, "dev");
+       jsonw_start_object(json_wtr);
+       jsonw_uint_field(json_wtr, "ifindex", ifindex);
+       jsonw_uint_field(json_wtr, "ns_dev", ns_dev);
+       jsonw_uint_field(json_wtr, "ns_inode", ns_inode);
+       if (ifindex_to_name_ns(ifindex, ns_dev, ns_inode, name))
+               jsonw_string_field(json_wtr, "ifname", name);
+       jsonw_end_object(json_wtr);
+}
index 1551d39..57d32e8 100644 (file)
@@ -107,7 +107,14 @@ void disasm_print_insn(unsigned char *image, ssize_t len, int opcodes)
 
        disassemble_init_for_target(&info);
 
+#ifdef DISASM_FOUR_ARGS_SIGNATURE
+       disassemble = disassembler(info.arch,
+                                  bfd_big_endian(bfdf),
+                                  info.mach,
+                                  bfdf);
+#else
        disassemble = disassembler(bfdf);
+#endif
        assert(disassemble);
 
        if (json_output)
index ecd53cc..3a0396d 100644 (file)
@@ -38,7 +38,6 @@
 #include <errno.h>
 #include <getopt.h>
 #include <linux/bpf.h>
-#include <linux/version.h>
 #include <stdio.h>
 #include <stdlib.h>
 #include <string.h>
@@ -95,21 +94,13 @@ static int do_help(int argc, char **argv)
 
 static int do_version(int argc, char **argv)
 {
-       unsigned int version[3];
-
-       version[0] = LINUX_VERSION_CODE >> 16;
-       version[1] = LINUX_VERSION_CODE >> 8 & 0xf;
-       version[2] = LINUX_VERSION_CODE & 0xf;
-
        if (json_output) {
                jsonw_start_object(json_wtr);
                jsonw_name(json_wtr, "version");
-               jsonw_printf(json_wtr, "\"%u.%u.%u\"",
-                            version[0], version[1], version[2]);
+               jsonw_printf(json_wtr, "\"%s\"", BPFTOOL_VERSION);
                jsonw_end_object(json_wtr);
        } else {
-               printf("%s v%u.%u.%u\n", bin_name,
-                      version[0], version[1], version[2]);
+               printf("%s v%s\n", bin_name, BPFTOOL_VERSION);
        }
        return 0;
 }
index 8f6d3ca..65b526f 100644 (file)
@@ -96,6 +96,8 @@ struct pinned_obj {
 int build_pinned_obj_table(struct pinned_obj_table *table,
                           enum bpf_obj_type type);
 void delete_pinned_obj_table(struct pinned_obj_table *tab);
+void print_dev_plain(__u32 ifindex, __u64 ns_dev, __u64 ns_inode);
+void print_dev_json(__u32 ifindex, __u64 ns_dev, __u64 ns_inode);
 
 struct cmd {
        const char *cmd;
index e2450c8..8d7db9d 100644 (file)
@@ -523,21 +523,23 @@ static int do_show(int argc, char **argv)
                                break;
                        p_err("can't get next map: %s%s", strerror(errno),
                              errno == EINVAL ? " -- kernel too old?" : "");
-                       return -1;
+                       break;
                }
 
                fd = bpf_map_get_fd_by_id(id);
                if (fd < 0) {
+                       if (errno == ENOENT)
+                               continue;
                        p_err("can't get map by id (%u): %s",
                              id, strerror(errno));
-                       return -1;
+                       break;
                }
 
                err = bpf_obj_get_info_by_fd(fd, &info, &len);
                if (err) {
                        p_err("can't get map info: %s", strerror(errno));
                        close(fd);
-                       return -1;
+                       break;
                }
 
                if (json_output)
@@ -859,7 +861,7 @@ static int do_help(int argc, char **argv)
        }
 
        fprintf(stderr,
-               "Usage: %s %s show   [MAP]\n"
+               "Usage: %s %s { show | list }   [MAP]\n"
                "       %s %s dump    MAP\n"
                "       %s %s update  MAP  key BYTES value VALUE [UPDATE_FLAGS]\n"
                "       %s %s lookup  MAP  key BYTES\n"
@@ -883,6 +885,7 @@ static int do_help(int argc, char **argv)
 
 static const struct cmd cmds[] = {
        { "show",       do_show },
+       { "list",       do_show },
        { "help",       do_help },
        { "dump",       do_dump },
        { "update",     do_update },
index 037484c..c6a28be 100644 (file)
@@ -230,6 +230,8 @@ static void print_prog_json(struct bpf_prog_info *info, int fd)
                     info->tag[0], info->tag[1], info->tag[2], info->tag[3],
                     info->tag[4], info->tag[5], info->tag[6], info->tag[7]);
 
+       print_dev_json(info->ifindex, info->netns_dev, info->netns_ino);
+
        if (info->load_time) {
                char buf[32];
 
@@ -287,6 +289,7 @@ static void print_prog_plain(struct bpf_prog_info *info, int fd)
 
        printf("tag ");
        fprint_hex(stdout, info->tag, BPF_TAG_SIZE, "");
+       print_dev_plain(info->ifindex, info->netns_dev, info->netns_ino);
        printf("\n");
 
        if (info->load_time) {
@@ -383,6 +386,8 @@ static int do_show(int argc, char **argv)
 
                fd = bpf_prog_get_fd_by_id(id);
                if (fd < 0) {
+                       if (errno == ENOENT)
+                               continue;
                        p_err("can't get prog by id (%u): %s",
                              id, strerror(errno));
                        err = -1;
@@ -401,6 +406,88 @@ static int do_show(int argc, char **argv)
        return err;
 }
 
+#define SYM_MAX_NAME   256
+
+struct kernel_sym {
+       unsigned long address;
+       char name[SYM_MAX_NAME];
+};
+
+struct dump_data {
+       unsigned long address_call_base;
+       struct kernel_sym *sym_mapping;
+       __u32 sym_count;
+       char scratch_buff[SYM_MAX_NAME];
+};
+
+static int kernel_syms_cmp(const void *sym_a, const void *sym_b)
+{
+       return ((struct kernel_sym *)sym_a)->address -
+              ((struct kernel_sym *)sym_b)->address;
+}
+
+static void kernel_syms_load(struct dump_data *dd)
+{
+       struct kernel_sym *sym;
+       char buff[256];
+       void *tmp, *address;
+       FILE *fp;
+
+       fp = fopen("/proc/kallsyms", "r");
+       if (!fp)
+               return;
+
+       while (!feof(fp)) {
+               if (!fgets(buff, sizeof(buff), fp))
+                       break;
+               tmp = realloc(dd->sym_mapping,
+                             (dd->sym_count + 1) *
+                             sizeof(*dd->sym_mapping));
+               if (!tmp) {
+out:
+                       free(dd->sym_mapping);
+                       dd->sym_mapping = NULL;
+                       fclose(fp);
+                       return;
+               }
+               dd->sym_mapping = tmp;
+               sym = &dd->sym_mapping[dd->sym_count];
+               if (sscanf(buff, "%p %*c %s", &address, sym->name) != 2)
+                       continue;
+               sym->address = (unsigned long)address;
+               if (!strcmp(sym->name, "__bpf_call_base")) {
+                       dd->address_call_base = sym->address;
+                       /* sysctl kernel.kptr_restrict was set */
+                       if (!sym->address)
+                               goto out;
+               }
+               if (sym->address)
+                       dd->sym_count++;
+       }
+
+       fclose(fp);
+
+       qsort(dd->sym_mapping, dd->sym_count,
+             sizeof(*dd->sym_mapping), kernel_syms_cmp);
+}
+
+static void kernel_syms_destroy(struct dump_data *dd)
+{
+       free(dd->sym_mapping);
+}
+
+static struct kernel_sym *kernel_syms_search(struct dump_data *dd,
+                                            unsigned long key)
+{
+       struct kernel_sym sym = {
+               .address = key,
+       };
+
+       return dd->sym_mapping ?
+              bsearch(&sym, dd->sym_mapping, dd->sym_count,
+                      sizeof(*dd->sym_mapping), kernel_syms_cmp) : NULL;
+}
+
 static void print_insn(struct bpf_verifier_env *env, const char *fmt, ...)
 {
        va_list args;
@@ -410,8 +497,71 @@ static void print_insn(struct bpf_verifier_env *env, const char *fmt, ...)
        va_end(args);
 }
 
-static void dump_xlated_plain(void *buf, unsigned int len, bool opcodes)
+static const char *print_call_pcrel(struct dump_data *dd,
+                                   struct kernel_sym *sym,
+                                   unsigned long address,
+                                   const struct bpf_insn *insn)
+{
+       if (sym)
+               snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
+                        "%+d#%s", insn->off, sym->name);
+       else
+               snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
+                        "%+d#0x%lx", insn->off, address);
+       return dd->scratch_buff;
+}
+
+static const char *print_call_helper(struct dump_data *dd,
+                                    struct kernel_sym *sym,
+                                    unsigned long address)
+{
+       if (sym)
+               snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
+                        "%s", sym->name);
+       else
+               snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
+                        "0x%lx", address);
+       return dd->scratch_buff;
+}
+
+static const char *print_call(void *private_data,
+                             const struct bpf_insn *insn)
+{
+       struct dump_data *dd = private_data;
+       unsigned long address = dd->address_call_base + insn->imm;
+       struct kernel_sym *sym;
+
+       sym = kernel_syms_search(dd, address);
+       if (insn->src_reg == BPF_PSEUDO_CALL)
+               return print_call_pcrel(dd, sym, address, insn);
+       else
+               return print_call_helper(dd, sym, address);
+}
+
+static const char *print_imm(void *private_data,
+                            const struct bpf_insn *insn,
+                            __u64 full_imm)
 {
+       struct dump_data *dd = private_data;
+
+       if (insn->src_reg == BPF_PSEUDO_MAP_FD)
+               snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
+                        "map[id:%u]", insn->imm);
+       else
+               snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
+                        "0x%llx", (unsigned long long)full_imm);
+       return dd->scratch_buff;
+}
+
+static void dump_xlated_plain(struct dump_data *dd, void *buf,
+                             unsigned int len, bool opcodes)
+{
+       const struct bpf_insn_cbs cbs = {
+               .cb_print       = print_insn,
+               .cb_call        = print_call,
+               .cb_imm         = print_imm,
+               .private_data   = dd,
+       };
        struct bpf_insn *insn = buf;
        bool double_insn = false;
        unsigned int i;
@@ -425,7 +575,7 @@ static void dump_xlated_plain(void *buf, unsigned int len, bool opcodes)
                double_insn = insn[i].code == (BPF_LD | BPF_IMM | BPF_DW);
 
                printf("% 4d: ", i);
-               print_bpf_insn(print_insn, NULL, insn + i, true);
+               print_bpf_insn(&cbs, NULL, insn + i, true);
 
                if (opcodes) {
                        printf("       ");
@@ -454,8 +604,15 @@ static void print_insn_json(struct bpf_verifier_env *env, const char *fmt, ...)
        va_end(args);
 }
 
-static void dump_xlated_json(void *buf, unsigned int len, bool opcodes)
+static void dump_xlated_json(struct dump_data *dd, void *buf,
+                            unsigned int len, bool opcodes)
 {
+       const struct bpf_insn_cbs cbs = {
+               .cb_print       = print_insn_json,
+               .cb_call        = print_call,
+               .cb_imm         = print_imm,
+               .private_data   = dd,
+       };
        struct bpf_insn *insn = buf;
        bool double_insn = false;
        unsigned int i;
@@ -470,7 +627,7 @@ static void dump_xlated_json(void *buf, unsigned int len, bool opcodes)
 
                jsonw_start_object(json_wtr);
                jsonw_name(json_wtr, "disasm");
-               print_bpf_insn(print_insn_json, NULL, insn + i, true);
+               print_bpf_insn(&cbs, NULL, insn + i, true);
 
                if (opcodes) {
                        jsonw_name(json_wtr, "opcodes");
@@ -505,6 +662,7 @@ static void dump_xlated_json(void *buf, unsigned int len, bool opcodes)
 static int do_dump(int argc, char **argv)
 {
        struct bpf_prog_info info = {};
+       struct dump_data dd = {};
        __u32 len = sizeof(info);
        unsigned int buf_size;
        char *filepath = NULL;
@@ -592,6 +750,14 @@ static int do_dump(int argc, char **argv)
                goto err_free;
        }
 
+       if ((member_len == &info.jited_prog_len &&
+            info.jited_prog_insns == 0) ||
+           (member_len == &info.xlated_prog_len &&
+            info.xlated_prog_insns == 0)) {
+               p_err("error retrieving insn dump: kernel.kptr_restrict set?");
+               goto err_free;
+       }
+
        if (filepath) {
                fd = open(filepath, O_WRONLY | O_CREAT | O_TRUNC, 0600);
                if (fd < 0) {
@@ -608,17 +774,19 @@ static int do_dump(int argc, char **argv)
                        goto err_free;
                }
        } else {
-               if (member_len == &info.jited_prog_len)
+               if (member_len == &info.jited_prog_len) {
                        disasm_print_insn(buf, *member_len, opcodes);
-               else
+               } else {
+                       kernel_syms_load(&dd);
                        if (json_output)
-                               dump_xlated_json(buf, *member_len, opcodes);
+                               dump_xlated_json(&dd, buf, *member_len, opcodes);
                        else
-                               dump_xlated_plain(buf, *member_len, opcodes);
+                               dump_xlated_plain(&dd, buf, *member_len, opcodes);
+                       kernel_syms_destroy(&dd);
+               }
        }
 
        free(buf);
-
        return 0;
 
 err_free:
@@ -645,12 +813,12 @@ static int do_load(int argc, char **argv)
                usage();
 
        if (bpf_prog_load(argv[0], BPF_PROG_TYPE_UNSPEC, &obj, &prog_fd)) {
-               p_err("failed to load program\n");
+               p_err("failed to load program");
                return -1;
        }
 
        if (do_pin_fd(prog_fd, argv[1])) {
-               p_err("failed to pin program\n");
+               p_err("failed to pin program");
                return -1;
        }
 
@@ -668,7 +836,7 @@ static int do_help(int argc, char **argv)
        }
 
        fprintf(stderr,
-               "Usage: %s %s show [PROG]\n"
+               "Usage: %s %s { show | list } [PROG]\n"
                "       %s %s dump xlated PROG [{ file FILE | opcodes }]\n"
                "       %s %s dump jited  PROG [{ file FILE | opcodes }]\n"
                "       %s %s pin   PROG FILE\n"
@@ -686,6 +854,7 @@ static int do_help(int argc, char **argv)
 
 static const struct cmd cmds[] = {
        { "show",       do_show },
+       { "list",       do_show },
        { "help",       do_help },
        { "dump",       do_dump },
        { "pin",        do_pin },
index 9698264..17f2c73 100644 (file)
@@ -13,6 +13,7 @@ FILES=                                          \
          test-hello.bin                         \
          test-libaudit.bin                      \
          test-libbfd.bin                        \
+         test-disassembler-four-args.bin        \
          test-liberty.bin                       \
          test-liberty-z.bin                     \
          test-cplus-demangle.bin                \
@@ -188,6 +189,9 @@ $(OUTPUT)test-libpython-version.bin:
 $(OUTPUT)test-libbfd.bin:
        $(BUILD) -DPACKAGE='"perf"' -lbfd -lz -liberty -ldl
 
+$(OUTPUT)test-disassembler-four-args.bin:
+       $(BUILD) -lbfd -lopcodes
+
 $(OUTPUT)test-liberty.bin:
        $(CC) $(CFLAGS) -Wall -Werror -o $@ test-libbfd.c -DPACKAGE='"perf"' $(LDFLAGS) -lbfd -ldl -liberty
 
diff --git a/tools/build/feature/test-disassembler-four-args.c b/tools/build/feature/test-disassembler-four-args.c
new file mode 100644 (file)
index 0000000..45ce65c
--- /dev/null
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <bfd.h>
+#include <dis-asm.h>
+
+int main(void)
+{
+       bfd *abfd = bfd_openr(NULL, NULL);
+
+       disassembler(bfd_get_arch(abfd),
+                    bfd_big_endian(abfd),
+                    bfd_get_mach(abfd),
+                    abfd);
+
+       return 0;
+}
index db1b092..4e8c60a 100644 (file)
@@ -921,6 +921,9 @@ struct bpf_prog_info {
        __u32 nr_map_ids;
        __aligned_u64 map_ids;
        char name[BPF_OBJ_NAME_LEN];
+       __u32 ifindex;
+       __u64 netns_dev;
+       __u64 netns_ino;
 } __attribute__((aligned(8)));
 
 struct bpf_map_info {
index 217cf6f..a5684d0 100755 (executable)
@@ -478,7 +478,7 @@ class Provider(object):
     @staticmethod
     def is_field_wanted(fields_filter, field):
         """Indicate whether field is valid according to fields_filter."""
-        if not fields_filter or fields_filter == "help":
+        if not fields_filter:
             return True
         return re.match(fields_filter, field) is not None
 
@@ -549,8 +549,8 @@ class TracepointProvider(Provider):
 
     def update_fields(self, fields_filter):
         """Refresh fields, applying fields_filter"""
-        self._fields = [field for field in self.get_available_fields()
-                        if self.is_field_wanted(fields_filter, field)]
+        self.fields = [field for field in self.get_available_fields()
+                       if self.is_field_wanted(fields_filter, field)]
 
     @staticmethod
     def get_online_cpus():
@@ -950,7 +950,8 @@ class Tui(object):
             curses.nocbreak()
             curses.endwin()
 
-    def get_all_gnames(self):
+    @staticmethod
+    def get_all_gnames():
         """Returns a list of (pid, gname) tuples of all running guests"""
         res = []
         try:
@@ -963,7 +964,7 @@ class Tui(object):
             # perform a sanity check before calling the more expensive
             # function to possibly extract the guest name
             if ' -name ' in line[1]:
-                res.append((line[0], self.get_gname_from_pid(line[0])))
+                res.append((line[0], Tui.get_gname_from_pid(line[0])))
         child.stdout.close()
 
         return res
@@ -984,7 +985,8 @@ class Tui(object):
         except Exception:
             self.screen.addstr(row + 1, 2, 'Not available')
 
-    def get_pid_from_gname(self, gname):
+    @staticmethod
+    def get_pid_from_gname(gname):
         """Fuzzy function to convert guest name to QEMU process pid.
 
         Returns a list of potential pids, can be empty if no match found.
@@ -992,7 +994,7 @@ class Tui(object):
 
         """
         pids = []
-        for line in self.get_all_gnames():
+        for line in Tui.get_all_gnames():
             if gname == line[1]:
                 pids.append(int(line[0]))
 
@@ -1090,15 +1092,16 @@ class Tui(object):
             # sort by totals
             return (0, -stats[x][0])
         total = 0.
-        for val in stats.values():
-            total += val[0]
+        for key in stats.keys():
+            if key.find('(') is -1:
+                total += stats[key][0]
         if self._sorting == SORT_DEFAULT:
             sortkey = sortCurAvg
         else:
             sortkey = sortTotal
+        tavg = 0
         for key in sorted(stats.keys(), key=sortkey):
-
-            if row >= self.screen.getmaxyx()[0]:
+            if row >= self.screen.getmaxyx()[0] - 1:
                 break
             values = stats[key]
             if not values[0] and not values[1]:
@@ -1110,9 +1113,15 @@ class Tui(object):
                 self.screen.addstr(row, 1, '%-40s %10d%7.1f %8s' %
                                    (key, values[0], values[0] * 100 / total,
                                     cur))
+                if cur is not '' and key.find('(') is -1:
+                    tavg += cur
             row += 1
         if row == 3:
             self.screen.addstr(4, 1, 'No matching events reported yet')
+        else:
+            self.screen.addstr(row, 1, '%-40s %10d        %8s' %
+                               ('Total', total, tavg if tavg else ''),
+                               curses.A_BOLD)
         self.screen.refresh()
 
     def show_msg(self, text):
@@ -1358,7 +1367,7 @@ class Tui(object):
                 if char == 'x':
                     self.update_drilldown()
                     # prevents display of current values on next refresh
-                    self.stats.get()
+                    self.stats.get(self._display_guests)
             except KeyboardInterrupt:
                 break
             except curses.error:
@@ -1451,16 +1460,13 @@ Press any other key to refresh statistics immediately.
         try:
             pids = Tui.get_pid_from_gname(val)
         except:
-            raise optparse.OptionValueError('Error while searching for guest '
-                                            '"{}", use "-p" to specify a pid '
-                                            'instead'.format(val))
+            sys.exit('Error while searching for guest "{}". Use "-p" to '
+                     'specify a pid instead?'.format(val))
         if len(pids) == 0:
-            raise optparse.OptionValueError('No guest by the name "{}" '
-                                            'found'.format(val))
+            sys.exit('Error: No guest by the name "{}" found'.format(val))
         if len(pids) > 1:
-            raise optparse.OptionValueError('Multiple processes found (pids: '
-                                            '{}) - use "-p" to specify a pid '
-                                            'instead'.format(" ".join(pids)))
+            sys.exit('Error: Multiple processes found (pids: {}). Use "-p" '
+                     'to specify the desired pid'.format(" ".join(pids)))
         parser.values.pid = pids[0]
 
     optparser = optparse.OptionParser(description=description_text,
@@ -1518,7 +1524,16 @@ Press any other key to refresh statistics immediately.
                          help='restrict statistics to guest by name',
                          callback=cb_guest_to_pid,
                          )
-    (options, _) = optparser.parse_args(sys.argv)
+    options, unkn = optparser.parse_args(sys.argv)
+    if len(unkn) != 1:
+        sys.exit('Error: Extra argument(s): ' + ' '.join(unkn[1:]))
+    try:
+        # verify that we were passed a valid regex up front
+        re.compile(options.fields)
+    except re.error:
+        sys.exit('Error: "' + options.fields + '" is not a valid regular '
+                 'expression')
+
     return options
 
 
@@ -1564,16 +1579,13 @@ def main():
 
     stats = Stats(options)
 
-    if options.fields == "help":
-        event_list = "\n"
-        s = stats.get()
-        for key in s.keys():
-            if key.find('(') != -1:
-                key = key[0:key.find('(')]
-            if event_list.find('\n' + key + '\n') == -1:
-                event_list += key + '\n'
-        sys.stdout.write(event_list)
-        return ""
+    if options.fields == 'help':
+        stats.fields_filter = None
+        event_list = []
+        for key in stats.get().keys():
+            event_list.append(key.split('(', 1)[0])
+        sys.stdout.write('  ' + '\n  '.join(sorted(set(event_list))) + '\n')
+        sys.exit(0)
 
     if options.log:
         log(stats)
index e5cf836..b5b3810 100644 (file)
@@ -50,6 +50,8 @@ INTERACTIVE COMMANDS
 *s*::   set update interval
 
 *x*::  toggle reporting of stats for child trace events
+ ::     *Note*: The stats for the parents summarize the respective child trace
+                events
 
 Press any other key to refresh statistics immediately.
 
@@ -86,7 +88,7 @@ OPTIONS
 
 -f<fields>::
 --fields=<fields>::
-       fields to display (regex)
+       fields to display (regex), "-f help" for a list of available events
 
 -h::
 --help::
index 5b83875..e9c4b7c 100644 (file)
@@ -910,8 +910,9 @@ bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr,
                                   GELF_R_SYM(rel.r_info));
                        return -LIBBPF_ERRNO__FORMAT;
                }
-               pr_debug("relo for %ld value %ld name %d\n",
-                        rel.r_info >> 32, sym.st_value, sym.st_name);
+               pr_debug("relo for %lld value %lld name %d\n",
+                        (long long) (rel.r_info >> 32),
+                        (long long) sym.st_value, sym.st_name);
 
                if (sym.st_shndx != maps_shndx && sym.st_shndx != text_shndx) {
                        pr_warning("Program '%s' contains non-map related relo data pointing to section %u\n",
index a1fcb0c..a8aa7e2 100644 (file)
@@ -11,7 +11,7 @@ ifneq ($(wildcard $(GENHDR)),)
 endif
 
 CFLAGS += -Wall -O2 -I$(APIDIR) -I$(LIBDIR) -I$(GENDIR) $(GENFLAGS) -I../../../include
-LDLIBS += -lcap -lelf
+LDLIBS += -lcap -lelf -lrt
 
 TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test_progs \
        test_align test_verifier_log test_dev_cgroup
@@ -19,7 +19,7 @@ TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test
 TEST_GEN_FILES = test_pkt_access.o test_xdp.o test_l4lb.o test_tcp_estats.o test_obj_id.o \
        test_pkt_md_access.o test_xdp_redirect.o test_xdp_meta.o sockmap_parse_prog.o     \
        sockmap_verdict_prog.o dev_cgroup.o sample_ret0.o test_tracepoint.o \
-       test_l4lb_noinline.o test_xdp_noinline.o
+       test_l4lb_noinline.o test_xdp_noinline.o test_stacktrace_map.o
 
 TEST_PROGS := test_kmod.sh test_xdp_redirect.sh test_xdp_meta.sh \
        test_offload.py
@@ -41,7 +41,7 @@ $(BPFOBJ): force
 CLANG ?= clang
 LLC   ?= llc
 
-PROBE := $(shell llc -march=bpf -mcpu=probe -filetype=null /dev/null 2>&1)
+PROBE := $(shell $(LLC) -march=bpf -mcpu=probe -filetype=null /dev/null 2>&1)
 
 # Let newer LLVM versions transparently probe the kernel for availability
 # of full BPF instruction set.
index 9d48973..983dd25 100644 (file)
@@ -4,3 +4,4 @@ CONFIG_NET_CLS_BPF=m
 CONFIG_BPF_EVENTS=y
 CONFIG_TEST_BPF=m
 CONFIG_CGROUP_BPF=y
+CONFIG_NETDEVSIM=m
index 02c85d6..c1535b3 100644 (file)
@@ -10,6 +10,8 @@
 #include <string.h>
 #include <errno.h>
 #include <assert.h>
+#include <sys/time.h>
+#include <sys/resource.h>
 
 #include <linux/bpf.h>
 #include <bpf/bpf.h>
 
 int main(int argc, char **argv)
 {
+       struct rlimit limit  = { RLIM_INFINITY, RLIM_INFINITY };
        struct bpf_object *obj;
        int error = EXIT_FAILURE;
        int prog_fd, cgroup_fd;
        __u32 prog_cnt;
 
+       if (setrlimit(RLIMIT_MEMLOCK, &limit) < 0)
+               perror("Unable to lift memlock rlimit");
+
        if (bpf_prog_load(DEV_CGROUP_PROG, BPF_PROG_TYPE_CGROUP_DEVICE,
                          &obj, &prog_fd)) {
                printf("Failed to load DEV_CGROUP program\n");
-               goto err;
+               goto out;
        }
 
        if (setup_cgroup_environment()) {
@@ -89,5 +95,6 @@ int main(int argc, char **argv)
 err:
        cleanup_cgroup_environment();
 
+out:
        return error;
 }
index 3914f7a..e3c750f 100755 (executable)
@@ -18,6 +18,8 @@ import argparse
 import json
 import os
 import pprint
+import random
+import string
 import subprocess
 import time
 
@@ -27,6 +29,7 @@ bpf_test_dir = os.path.dirname(os.path.realpath(__file__))
 pp = pprint.PrettyPrinter()
 devs = [] # devices we created for clean up
 files = [] # files to be removed
+netns = [] # net namespaces to be removed
 
 def log_get_sec(level=0):
     return "*" * (log_level + level)
@@ -128,22 +131,25 @@ def rm(f):
     if f in files:
         files.remove(f)
 
-def tool(name, args, flags, JSON=True, fail=True):
+def tool(name, args, flags, JSON=True, ns="", fail=True):
     params = ""
     if JSON:
         params += "%s " % (flags["json"])
 
-    ret, out = cmd(name + " " + params + args, fail=fail)
+    if ns != "":
+        ns = "ip netns exec %s " % (ns)
+
+    ret, out = cmd(ns + name + " " + params + args, fail=fail)
     if JSON and len(out.strip()) != 0:
         return ret, json.loads(out)
     else:
         return ret, out
 
-def bpftool(args, JSON=True, fail=True):
-    return tool("bpftool", args, {"json":"-p"}, JSON=JSON, fail=fail)
+def bpftool(args, JSON=True, ns="", fail=True):
+    return tool("bpftool", args, {"json":"-p"}, JSON=JSON, ns=ns, fail=fail)
 
-def bpftool_prog_list(expected=None):
-    _, progs = bpftool("prog show", JSON=True, fail=True)
+def bpftool_prog_list(expected=None, ns=""):
+    _, progs = bpftool("prog show", JSON=True, ns=ns, fail=True)
     if expected is not None:
         if len(progs) != expected:
             fail(True, "%d BPF programs loaded, expected %d" %
@@ -158,13 +164,13 @@ def bpftool_prog_list_wait(expected=0, n_retry=20):
         time.sleep(0.05)
     raise Exception("Time out waiting for program counts to stabilize want %d, have %d" % (expected, nprogs))
 
-def ip(args, force=False, JSON=True, fail=True):
+def ip(args, force=False, JSON=True, ns="", fail=True):
     if force:
         args = "-force " + args
-    return tool("ip", args, {"json":"-j"}, JSON=JSON, fail=fail)
+    return tool("ip", args, {"json":"-j"}, JSON=JSON, ns=ns, fail=fail)
 
-def tc(args, JSON=True, fail=True):
-    return tool("tc", args, {"json":"-p"}, JSON=JSON, fail=fail)
+def tc(args, JSON=True, ns="", fail=True):
+    return tool("tc", args, {"json":"-p"}, JSON=JSON, ns=ns, fail=fail)
 
 def ethtool(dev, opt, args, fail=True):
     return cmd("ethtool %s %s %s" % (opt, dev["ifname"], args), fail=fail)
@@ -178,6 +184,15 @@ def bpf_pinned(name):
 def bpf_bytecode(bytecode):
     return "bytecode \"%s\"" % (bytecode)
 
+def mknetns(n_retry=10):
+    for i in range(n_retry):
+        name = ''.join([random.choice(string.ascii_letters) for i in range(8)])
+        ret, _ = ip("netns add %s" % (name), fail=False)
+        if ret == 0:
+            netns.append(name)
+            return name
+    return None
+
 class DebugfsDir:
     """
     Class for accessing DebugFS directories as a dictionary.
@@ -237,6 +252,8 @@ class NetdevSim:
         self.dev = self._netdevsim_create()
         devs.append(self)
 
+        self.ns = ""
+
         self.dfs_dir = '/sys/kernel/debug/netdevsim/%s' % (self.dev['ifname'])
         self.dfs_refresh()
 
@@ -257,7 +274,7 @@ class NetdevSim:
 
     def remove(self):
         devs.remove(self)
-        ip("link del dev %s" % (self.dev["ifname"]))
+        ip("link del dev %s" % (self.dev["ifname"]), ns=self.ns)
 
     def dfs_refresh(self):
         self.dfs = DebugfsDir(self.dfs_dir)
@@ -285,6 +302,11 @@ class NetdevSim:
             time.sleep(0.05)
         raise Exception("Time out waiting for program counts to stabilize want %d/%d, have %d bound, %d loaded" % (bound, total, nbound, nprogs))
 
+    def set_ns(self, ns):
+        name = "1" if ns == "" else ns
+        ip("link set dev %s netns %s" % (self.dev["ifname"], name), ns=self.ns)
+        self.ns = ns
+
     def set_mtu(self, mtu, fail=True):
         return ip("link set dev %s mtu %d" % (self.dev["ifname"], mtu),
                   fail=fail)
@@ -372,6 +394,8 @@ def clean_up():
         dev.remove()
     for f in files:
         cmd("rm -f %s" % (f))
+    for ns in netns:
+        cmd("ip netns delete %s" % (ns))
 
 def pin_prog(file_name, idx=0):
     progs = bpftool_prog_list(expected=(idx + 1))
@@ -381,6 +405,35 @@ def pin_prog(file_name, idx=0):
 
     return file_name, bpf_pinned(file_name)
 
+def check_dev_info(other_ns, ns, pin_file=None, removed=False):
+    if removed:
+        bpftool_prog_list(expected=0)
+        ret, err = bpftool("prog show pin %s" % (pin_file), fail=False)
+        fail(ret == 0, "Showing prog with removed device did not fail")
+        fail(err["error"].find("No such device") == -1,
+             "Showing prog with removed device expected ENODEV, error is %s" %
+             (err["error"]))
+        return
+    progs = bpftool_prog_list(expected=int(not removed), ns=ns)
+    prog = progs[0]
+
+    fail("dev" not in prog.keys(), "Device parameters not reported")
+    dev = prog["dev"]
+    fail("ifindex" not in dev.keys(), "Device parameters not reported")
+    fail("ns_dev" not in dev.keys(), "Device parameters not reported")
+    fail("ns_inode" not in dev.keys(), "Device parameters not reported")
+
+    if not removed and not other_ns:
+        fail("ifname" not in dev.keys(), "Ifname not reported")
+        fail(dev["ifname"] != sim["ifname"],
+             "Ifname incorrect %s vs %s" % (dev["ifname"], sim["ifname"]))
+    else:
+        fail("ifname" in dev.keys(), "Ifname is reported for other ns")
+        if removed:
+            fail(dev["ifindex"] != 0, "Device perameters not zero on removed")
+            fail(dev["ns_dev"] != 0, "Device perameters not zero on removed")
+            fail(dev["ns_inode"] != 0, "Device perameters not zero on removed")
+
 # Parse command line
 parser = argparse.ArgumentParser()
 parser.add_argument("--log", help="output verbose log to given file")
@@ -417,6 +470,12 @@ for s in samples:
     skip(ret != 0, "sample %s/%s not found, please compile it" %
          (bpf_test_dir, s))
 
+# Check if net namespaces seem to work
+ns = mknetns()
+skip(ns is None, "Could not create a net namespace")
+cmd("ip netns delete %s" % (ns))
+netns = []
+
 try:
     obj = bpf_obj("sample_ret0.o")
     bytecode = bpf_bytecode("1,6 0 0 4294967295,")
@@ -549,6 +608,8 @@ try:
     progs = bpftool_prog_list(expected=1)
     fail(ipl["xdp"]["prog"]["id"] != progs[0]["id"],
          "Loaded program has wrong ID")
+    fail("dev" in progs[0].keys(),
+         "Device parameters reported for non-offloaded program")
 
     start_test("Test XDP prog replace with bad flags...")
     ret, _ = sim.set_xdp(obj, "offload", force=True, fail=False)
@@ -647,8 +708,8 @@ try:
 
     start_test("Test asking for TC offload of two filters...")
     sim.cls_bpf_add_filter(obj, da=True, skip_sw=True)
-    sim.cls_bpf_add_filter(obj, da=True, skip_sw=True)
-    # The above will trigger a splat until TC cls_bpf drivers are fixed
+    ret, _ = sim.cls_bpf_add_filter(obj, da=True, skip_sw=True, fail=False)
+    fail(ret == 0, "Managed to offload two TC filters at the same time")
 
     sim.tc_flush_filters(bound=2, total=2)
 
@@ -673,6 +734,35 @@ try:
     fail(time_diff < delay_sec, "Removal process took %s, expected %s" %
          (time_diff, delay_sec))
 
+    # Remove all pinned files and reinstantiate the netdev
+    clean_up()
+    bpftool_prog_list_wait(expected=0)
+
+    sim = NetdevSim()
+    sim.set_ethtool_tc_offloads(True)
+    sim.set_xdp(obj, "offload")
+
+    start_test("Test bpftool bound info reporting (own ns)...")
+    check_dev_info(False, "")
+
+    start_test("Test bpftool bound info reporting (other ns)...")
+    ns = mknetns()
+    sim.set_ns(ns)
+    check_dev_info(True, "")
+
+    start_test("Test bpftool bound info reporting (remote ns)...")
+    check_dev_info(False, ns)
+
+    start_test("Test bpftool bound info reporting (back to own ns)...")
+    sim.set_ns("")
+    check_dev_info(False, "")
+
+    pin_file, _ = pin_prog("/sys/fs/bpf/tmp")
+    sim.remove()
+
+    start_test("Test bpftool bound info reporting (removed dev)...")
+    check_dev_info(True, "", pin_file=pin_file, removed=True)
+
     print("%s: OK" % (os.path.basename(__file__)))
 
 finally:
index 6472ca9..b549308 100644 (file)
@@ -441,7 +441,7 @@ static void test_bpf_obj_id(void)
                          info_len != sizeof(struct bpf_map_info) ||
                          strcmp((char *)map_infos[i].name, expected_map_name),
                          "get-map-info(fd)",
-                         "err %d errno %d type %d(%d) info_len %u(%lu) key_size %u value_size %u max_entries %u map_flags %X name %s(%s)\n",
+                         "err %d errno %d type %d(%d) info_len %u(%Zu) key_size %u value_size %u max_entries %u map_flags %X name %s(%s)\n",
                          err, errno,
                          map_infos[i].type, BPF_MAP_TYPE_ARRAY,
                          info_len, sizeof(struct bpf_map_info),
@@ -485,7 +485,7 @@ static void test_bpf_obj_id(void)
                          *(int *)prog_infos[i].map_ids != map_infos[i].id ||
                          strcmp((char *)prog_infos[i].name, expected_prog_name),
                          "get-prog-info(fd)",
-                         "err %d errno %d i %d type %d(%d) info_len %u(%lu) jit_enabled %d jited_prog_len %u xlated_prog_len %u jited_prog %d xlated_prog %d load_time %lu(%lu) uid %u(%u) nr_map_ids %u(%u) map_id %u(%u) name %s(%s)\n",
+                         "err %d errno %d i %d type %d(%d) info_len %u(%Zu) jit_enabled %d jited_prog_len %u xlated_prog_len %u jited_prog %d xlated_prog %d load_time %lu(%lu) uid %u(%u) nr_map_ids %u(%u) map_id %u(%u) name %s(%s)\n",
                          err, errno, i,
                          prog_infos[i].type, BPF_PROG_TYPE_SOCKET_FILTER,
                          info_len, sizeof(struct bpf_prog_info),
@@ -553,7 +553,7 @@ static void test_bpf_obj_id(void)
                      memcmp(&prog_info, &prog_infos[i], info_len) ||
                      *(int *)prog_info.map_ids != saved_map_id,
                      "get-prog-info(next_id->fd)",
-                     "err %d errno %d info_len %u(%lu) memcmp %d map_id %u(%u)\n",
+                     "err %d errno %d info_len %u(%Zu) memcmp %d map_id %u(%u)\n",
                      err, errno, info_len, sizeof(struct bpf_prog_info),
                      memcmp(&prog_info, &prog_infos[i], info_len),
                      *(int *)prog_info.map_ids, saved_map_id);
@@ -599,7 +599,7 @@ static void test_bpf_obj_id(void)
                      memcmp(&map_info, &map_infos[i], info_len) ||
                      array_value != array_magic_value,
                      "check get-map-info(next_id->fd)",
-                     "err %d errno %d info_len %u(%lu) memcmp %d array_value %llu(%llu)\n",
+                     "err %d errno %d info_len %u(%Zu) memcmp %d array_value %llu(%llu)\n",
                      err, errno, info_len, sizeof(struct bpf_map_info),
                      memcmp(&map_info, &map_infos[i], info_len),
                      array_value, array_magic_value);
@@ -837,6 +837,132 @@ static void test_tp_attach_query(void)
        free(query);
 }
 
+static int compare_map_keys(int map1_fd, int map2_fd)
+{
+       __u32 key, next_key;
+       char val_buf[PERF_MAX_STACK_DEPTH * sizeof(__u64)];
+       int err;
+
+       err = bpf_map_get_next_key(map1_fd, NULL, &key);
+       if (err)
+               return err;
+       err = bpf_map_lookup_elem(map2_fd, &key, val_buf);
+       if (err)
+               return err;
+
+       while (bpf_map_get_next_key(map1_fd, &key, &next_key) == 0) {
+               err = bpf_map_lookup_elem(map2_fd, &next_key, val_buf);
+               if (err)
+                       return err;
+
+               key = next_key;
+       }
+       if (errno != ENOENT)
+               return -1;
+
+       return 0;
+}
+
+static void test_stacktrace_map()
+{
+       int control_map_fd, stackid_hmap_fd, stackmap_fd;
+       const char *file = "./test_stacktrace_map.o";
+       int bytes, efd, err, pmu_fd, prog_fd;
+       struct perf_event_attr attr = {};
+       __u32 key, val, duration = 0;
+       struct bpf_object *obj;
+       char buf[256];
+
+       err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
+       if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
+               goto out;
+
+       /* Get the ID for the sched/sched_switch tracepoint */
+       snprintf(buf, sizeof(buf),
+                "/sys/kernel/debug/tracing/events/sched/sched_switch/id");
+       efd = open(buf, O_RDONLY, 0);
+       if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno))
+               goto close_prog;
+
+       bytes = read(efd, buf, sizeof(buf));
+       close(efd);
+       if (CHECK(bytes <= 0 || bytes >= sizeof(buf),
+                 "read", "bytes %d errno %d\n", bytes, errno))
+               goto close_prog;
+
+       /* Open the perf event and attach bpf progrram */
+       attr.config = strtol(buf, NULL, 0);
+       attr.type = PERF_TYPE_TRACEPOINT;
+       attr.sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_CALLCHAIN;
+       attr.sample_period = 1;
+       attr.wakeup_events = 1;
+       pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
+                        0 /* cpu 0 */, -1 /* group id */,
+                        0 /* flags */);
+       if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n",
+                 pmu_fd, errno))
+               goto close_prog;
+
+       err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
+       if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n",
+                 err, errno))
+               goto close_pmu;
+
+       err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd);
+       if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n",
+                 err, errno))
+               goto disable_pmu;
+
+       /* find map fds */
+       control_map_fd = bpf_find_map(__func__, obj, "control_map");
+       if (CHECK(control_map_fd < 0, "bpf_find_map control_map",
+                 "err %d errno %d\n", err, errno))
+               goto disable_pmu;
+
+       stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap");
+       if (CHECK(stackid_hmap_fd < 0, "bpf_find_map stackid_hmap",
+                 "err %d errno %d\n", err, errno))
+               goto disable_pmu;
+
+       stackmap_fd = bpf_find_map(__func__, obj, "stackmap");
+       if (CHECK(stackmap_fd < 0, "bpf_find_map stackmap", "err %d errno %d\n",
+                 err, errno))
+               goto disable_pmu;
+
+       /* give some time for bpf program run */
+       sleep(1);
+
+       /* disable stack trace collection */
+       key = 0;
+       val = 1;
+       bpf_map_update_elem(control_map_fd, &key, &val, 0);
+
+       /* for every element in stackid_hmap, we can find a corresponding one
+        * in stackmap, and vise versa.
+        */
+       err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
+       if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
+                 "err %d errno %d\n", err, errno))
+               goto disable_pmu;
+
+       err = compare_map_keys(stackmap_fd, stackid_hmap_fd);
+       if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap",
+                 "err %d errno %d\n", err, errno))
+               ; /* fall through */
+
+disable_pmu:
+       ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
+
+close_pmu:
+       close(pmu_fd);
+
+close_prog:
+       bpf_object__close(obj);
+
+out:
+       return;
+}
+
 int main(void)
 {
        struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY };
@@ -852,6 +978,7 @@ int main(void)
        test_pkt_md_access();
        test_obj_name();
        test_tp_attach_query();
+       test_stacktrace_map();
 
        printf("Summary: %d PASSED, %d FAILED\n", pass_cnt, error_cnt);
        return error_cnt ? EXIT_FAILURE : EXIT_SUCCESS;
diff --git a/tools/testing/selftests/bpf/test_stacktrace_map.c b/tools/testing/selftests/bpf/test_stacktrace_map.c
new file mode 100644 (file)
index 0000000..76d85c5
--- /dev/null
@@ -0,0 +1,62 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2018 Facebook
+
+#include <linux/bpf.h>
+#include "bpf_helpers.h"
+
+#ifndef PERF_MAX_STACK_DEPTH
+#define PERF_MAX_STACK_DEPTH         127
+#endif
+
+struct bpf_map_def SEC("maps") control_map = {
+       .type = BPF_MAP_TYPE_ARRAY,
+       .key_size = sizeof(__u32),
+       .value_size = sizeof(__u32),
+       .max_entries = 1,
+};
+
+struct bpf_map_def SEC("maps") stackid_hmap = {
+       .type = BPF_MAP_TYPE_HASH,
+       .key_size = sizeof(__u32),
+       .value_size = sizeof(__u32),
+       .max_entries = 10000,
+};
+
+struct bpf_map_def SEC("maps") stackmap = {
+       .type = BPF_MAP_TYPE_STACK_TRACE,
+       .key_size = sizeof(__u32),
+       .value_size = sizeof(__u64) * PERF_MAX_STACK_DEPTH,
+       .max_entries = 10000,
+};
+
+/* taken from /sys/kernel/debug/tracing/events/sched/sched_switch/format */
+struct sched_switch_args {
+       unsigned long long pad;
+       char prev_comm[16];
+       int prev_pid;
+       int prev_prio;
+       long long prev_state;
+       char next_comm[16];
+       int next_pid;
+       int next_prio;
+};
+
+SEC("tracepoint/sched/sched_switch")
+int oncpu(struct sched_switch_args *ctx)
+{
+       __u32 key = 0, val = 0, *value_p;
+
+       value_p = bpf_map_lookup_elem(&control_map, &key);
+       if (value_p && *value_p)
+               return 0; /* skip if non-zero *value_p */
+
+       /* The size of stackmap and stackid_hmap should be the same */
+       key = bpf_get_stackid(ctx, &stackmap, 0);
+       if ((int)key >= 0)
+               bpf_map_update_elem(&stackid_hmap, &key, &val, 0);
+
+       return 0;
+}
+
+char _license[] SEC("license") = "GPL";
+__u32 _version SEC("version") = 1; /* ignored by tracepoints, required by libbpf.a */
index 3bacff0..5438479 100644 (file)
@@ -423,9 +423,7 @@ static struct bpf_test tests[] = {
                        BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
                        BPF_EXIT_INSN(),
                },
-               .errstr_unpriv = "R1 subtraction from stack pointer",
-               .result_unpriv = REJECT,
-               .errstr = "R1 invalid mem access",
+               .errstr = "R1 subtraction from stack pointer",
                .result = REJECT,
        },
        {
@@ -607,7 +605,6 @@ static struct bpf_test tests[] = {
                },
                .errstr = "misaligned stack access",
                .result = REJECT,
-               .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
        },
        {
                "invalid map_fd for function call",
@@ -1798,7 +1795,6 @@ static struct bpf_test tests[] = {
                },
                .result = REJECT,
                .errstr = "misaligned stack access off (0x0; 0x0)+-8+2 size 8",
-               .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
        },
        {
                "PTR_TO_STACK store/load - bad alignment on reg",
@@ -1811,7 +1807,6 @@ static struct bpf_test tests[] = {
                },
                .result = REJECT,
                .errstr = "misaligned stack access off (0x0; 0x0)+-10+8 size 8",
-               .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
        },
        {
                "PTR_TO_STACK store/load - out of bounds low",
@@ -1863,9 +1858,8 @@ static struct bpf_test tests[] = {
                        BPF_MOV64_IMM(BPF_REG_0, 0),
                        BPF_EXIT_INSN(),
                },
-               .result = ACCEPT,
-               .result_unpriv = REJECT,
-               .errstr_unpriv = "R1 pointer += pointer",
+               .result = REJECT,
+               .errstr = "R1 pointer += pointer",
        },
        {
                "unpriv: neg pointer",
@@ -2593,7 +2587,8 @@ static struct bpf_test tests[] = {
                        BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
                                    offsetof(struct __sk_buff, data)),
                        BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_4),
-                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                                   offsetof(struct __sk_buff, len)),
                        BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 49),
                        BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 49),
                        BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_2),
@@ -2900,7 +2895,7 @@ static struct bpf_test tests[] = {
                        BPF_MOV64_IMM(BPF_REG_0, 0),
                        BPF_EXIT_INSN(),
                },
-               .errstr = "invalid access to packet",
+               .errstr = "R3 pointer arithmetic on PTR_TO_PACKET_END",
                .result = REJECT,
                .prog_type = BPF_PROG_TYPE_SCHED_CLS,
        },
@@ -3886,9 +3881,7 @@ static struct bpf_test tests[] = {
                        BPF_EXIT_INSN(),
                },
                .fixup_map2 = { 3, 11 },
-               .errstr_unpriv = "R0 pointer += pointer",
-               .errstr = "R0 invalid mem access 'inv'",
-               .result_unpriv = REJECT,
+               .errstr = "R0 pointer += pointer",
                .result = REJECT,
                .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
        },
@@ -3929,7 +3922,7 @@ static struct bpf_test tests[] = {
                        BPF_EXIT_INSN(),
                },
                .fixup_map1 = { 4 },
-               .errstr = "R4 invalid mem access",
+               .errstr = "R4 pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL",
                .result = REJECT,
                .prog_type = BPF_PROG_TYPE_SCHED_CLS
        },
@@ -3950,7 +3943,7 @@ static struct bpf_test tests[] = {
                        BPF_EXIT_INSN(),
                },
                .fixup_map1 = { 4 },
-               .errstr = "R4 invalid mem access",
+               .errstr = "R4 pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL",
                .result = REJECT,
                .prog_type = BPF_PROG_TYPE_SCHED_CLS
        },
@@ -3971,7 +3964,7 @@ static struct bpf_test tests[] = {
                        BPF_EXIT_INSN(),
                },
                .fixup_map1 = { 4 },
-               .errstr = "R4 invalid mem access",
+               .errstr = "R4 pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL",
                .result = REJECT,
                .prog_type = BPF_PROG_TYPE_SCHED_CLS
        },
@@ -5196,10 +5189,8 @@ static struct bpf_test tests[] = {
                        BPF_EXIT_INSN(),
                },
                .fixup_map2 = { 3 },
-               .errstr_unpriv = "R0 bitwise operator &= on pointer",
-               .errstr = "invalid mem access 'inv'",
+               .errstr = "R0 bitwise operator &= on pointer",
                .result = REJECT,
-               .result_unpriv = REJECT,
        },
        {
                "map element value illegal alu op, 2",
@@ -5215,10 +5206,8 @@ static struct bpf_test tests[] = {
                        BPF_EXIT_INSN(),
                },
                .fixup_map2 = { 3 },
-               .errstr_unpriv = "R0 32-bit pointer arithmetic prohibited",
-               .errstr = "invalid mem access 'inv'",
+               .errstr = "R0 32-bit pointer arithmetic prohibited",
                .result = REJECT,
-               .result_unpriv = REJECT,
        },
        {
                "map element value illegal alu op, 3",
@@ -5234,10 +5223,8 @@ static struct bpf_test tests[] = {
                        BPF_EXIT_INSN(),
                },
                .fixup_map2 = { 3 },
-               .errstr_unpriv = "R0 pointer arithmetic with /= operator",
-               .errstr = "invalid mem access 'inv'",
+               .errstr = "R0 pointer arithmetic with /= operator",
                .result = REJECT,
-               .result_unpriv = REJECT,
        },
        {
                "map element value illegal alu op, 4",
@@ -6020,8 +6007,7 @@ static struct bpf_test tests[] = {
                        BPF_EXIT_INSN(),
                },
                .fixup_map_in_map = { 3 },
-               .errstr = "R1 type=inv expected=map_ptr",
-               .errstr_unpriv = "R1 pointer arithmetic on CONST_PTR_TO_MAP prohibited",
+               .errstr = "R1 pointer arithmetic on CONST_PTR_TO_MAP prohibited",
                .result = REJECT,
        },
        {
@@ -6118,6 +6104,30 @@ static struct bpf_test tests[] = {
                .result = ACCEPT,
        },
        {
+               "ld_abs: tests on r6 and skb data reload helper",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+                       BPF_LD_ABS(BPF_B, 0),
+                       BPF_LD_ABS(BPF_H, 0),
+                       BPF_LD_ABS(BPF_W, 0),
+                       BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
+                       BPF_MOV64_IMM(BPF_REG_6, 0),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+                       BPF_MOV64_IMM(BPF_REG_2, 1),
+                       BPF_MOV64_IMM(BPF_REG_3, 2),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_skb_vlan_push),
+                       BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
+                       BPF_LD_ABS(BPF_B, 0),
+                       BPF_LD_ABS(BPF_H, 0),
+                       BPF_LD_ABS(BPF_W, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 42),
+                       BPF_EXIT_INSN(),
+               },
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+               .result = ACCEPT,
+       },
+       {
                "ld_ind: check calling conv, r1",
                .insns = {
                        BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
@@ -6301,7 +6311,7 @@ static struct bpf_test tests[] = {
                        BPF_EXIT_INSN(),
                },
                .fixup_map1 = { 3 },
-               .errstr = "R0 min value is negative",
+               .errstr = "unbounded min value",
                .result = REJECT,
        },
        {
@@ -6325,7 +6335,7 @@ static struct bpf_test tests[] = {
                        BPF_EXIT_INSN(),
                },
                .fixup_map1 = { 3 },
-               .errstr = "R0 min value is negative",
+               .errstr = "unbounded min value",
                .result = REJECT,
        },
        {
@@ -6351,7 +6361,7 @@ static struct bpf_test tests[] = {
                        BPF_EXIT_INSN(),
                },
                .fixup_map1 = { 3 },
-               .errstr = "R8 invalid mem access 'inv'",
+               .errstr = "unbounded min value",
                .result = REJECT,
        },
        {
@@ -6376,7 +6386,7 @@ static struct bpf_test tests[] = {
                        BPF_EXIT_INSN(),
                },
                .fixup_map1 = { 3 },
-               .errstr = "R8 invalid mem access 'inv'",
+               .errstr = "unbounded min value",
                .result = REJECT,
        },
        {
@@ -6424,7 +6434,7 @@ static struct bpf_test tests[] = {
                        BPF_EXIT_INSN(),
                },
                .fixup_map1 = { 3 },
-               .errstr = "R0 min value is negative",
+               .errstr = "unbounded min value",
                .result = REJECT,
        },
        {
@@ -6495,7 +6505,7 @@ static struct bpf_test tests[] = {
                        BPF_EXIT_INSN(),
                },
                .fixup_map1 = { 3 },
-               .errstr = "R0 min value is negative",
+               .errstr = "unbounded min value",
                .result = REJECT,
        },
        {
@@ -6546,7 +6556,7 @@ static struct bpf_test tests[] = {
                        BPF_EXIT_INSN(),
                },
                .fixup_map1 = { 3 },
-               .errstr = "R0 min value is negative",
+               .errstr = "unbounded min value",
                .result = REJECT,
        },
        {
@@ -6573,7 +6583,7 @@ static struct bpf_test tests[] = {
                        BPF_EXIT_INSN(),
                },
                .fixup_map1 = { 3 },
-               .errstr = "R0 min value is negative",
+               .errstr = "unbounded min value",
                .result = REJECT,
        },
        {
@@ -6599,7 +6609,7 @@ static struct bpf_test tests[] = {
                        BPF_EXIT_INSN(),
                },
                .fixup_map1 = { 3 },
-               .errstr = "R0 min value is negative",
+               .errstr = "unbounded min value",
                .result = REJECT,
        },
        {
@@ -6628,7 +6638,7 @@ static struct bpf_test tests[] = {
                        BPF_EXIT_INSN(),
                },
                .fixup_map1 = { 3 },
-               .errstr = "R0 min value is negative",
+               .errstr = "unbounded min value",
                .result = REJECT,
        },
        {
@@ -6658,7 +6668,7 @@ static struct bpf_test tests[] = {
                        BPF_JMP_IMM(BPF_JA, 0, 0, -7),
                },
                .fixup_map1 = { 4 },
-               .errstr = "R0 min value is negative",
+               .errstr = "unbounded min value",
                .result = REJECT,
        },
        {
@@ -6686,8 +6696,7 @@ static struct bpf_test tests[] = {
                        BPF_EXIT_INSN(),
                },
                .fixup_map1 = { 3 },
-               .errstr_unpriv = "R0 pointer comparison prohibited",
-               .errstr = "R0 min value is negative",
+               .errstr = "unbounded min value",
                .result = REJECT,
                .result_unpriv = REJECT,
        },
@@ -6743,6 +6752,462 @@ static struct bpf_test tests[] = {
                .result = REJECT,
        },
        {
+               "bounds check based on zero-extended MOV",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+                       /* r2 = 0x0000'0000'ffff'ffff */
+                       BPF_MOV32_IMM(BPF_REG_2, 0xffffffff),
+                       /* r2 = 0 */
+                       BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32),
+                       /* no-op */
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
+                       /* access at offset 0 */
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+                       /* exit */
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map1 = { 3 },
+               .result = ACCEPT
+       },
+       {
+               "bounds check based on sign-extended MOV. test1",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+                       /* r2 = 0xffff'ffff'ffff'ffff */
+                       BPF_MOV64_IMM(BPF_REG_2, 0xffffffff),
+                       /* r2 = 0xffff'ffff */
+                       BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32),
+                       /* r0 = <oob pointer> */
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
+                       /* access to OOB pointer */
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+                       /* exit */
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map1 = { 3 },
+               .errstr = "map_value pointer and 4294967295",
+               .result = REJECT
+       },
+       {
+               "bounds check based on sign-extended MOV. test2",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+                       /* r2 = 0xffff'ffff'ffff'ffff */
+                       BPF_MOV64_IMM(BPF_REG_2, 0xffffffff),
+                       /* r2 = 0xfff'ffff */
+                       BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 36),
+                       /* r0 = <oob pointer> */
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
+                       /* access to OOB pointer */
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+                       /* exit */
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map1 = { 3 },
+               .errstr = "R0 min value is outside of the array range",
+               .result = REJECT
+       },
+       {
+               "bounds check based on reg_off + var_off + insn_off. test1",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+                                   offsetof(struct __sk_buff, mark)),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+                       BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 29) - 1),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map1 = { 4 },
+               .errstr = "value_size=8 off=1073741825",
+               .result = REJECT,
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+       },
+       {
+               "bounds check based on reg_off + var_off + insn_off. test2",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+                                   offsetof(struct __sk_buff, mark)),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+                       BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 30) - 1),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map1 = { 4 },
+               .errstr = "value 1073741823",
+               .result = REJECT,
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+       },
+       {
+               "bounds check after truncation of non-boundary-crossing range",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
+                       /* r1 = [0x00, 0xff] */
+                       BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+                       BPF_MOV64_IMM(BPF_REG_2, 1),
+                       /* r2 = 0x10'0000'0000 */
+                       BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 36),
+                       /* r1 = [0x10'0000'0000, 0x10'0000'00ff] */
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
+                       /* r1 = [0x10'7fff'ffff, 0x10'8000'00fe] */
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
+                       /* r1 = [0x00, 0xff] */
+                       BPF_ALU32_IMM(BPF_SUB, BPF_REG_1, 0x7fffffff),
+                       /* r1 = 0 */
+                       BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
+                       /* no-op */
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+                       /* access at offset 0 */
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+                       /* exit */
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map1 = { 3 },
+               .result = ACCEPT
+       },
+       {
+               "bounds check after truncation of boundary-crossing range (1)",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
+                       /* r1 = [0x00, 0xff] */
+                       BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
+                       /* r1 = [0xffff'ff80, 0x1'0000'007f] */
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
+                       /* r1 = [0xffff'ff80, 0xffff'ffff] or
+                        *      [0x0000'0000, 0x0000'007f]
+                        */
+                       BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 0),
+                       BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
+                       /* r1 = [0x00, 0xff] or
+                        *      [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
+                        */
+                       BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
+                       /* r1 = 0 or
+                        *      [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
+                        */
+                       BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
+                       /* no-op or OOB pointer computation */
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+                       /* potentially OOB access */
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+                       /* exit */
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map1 = { 3 },
+               /* not actually fully unbounded, but the bound is very high */
+               .errstr = "R0 unbounded memory access",
+               .result = REJECT
+       },
+       {
+               "bounds check after truncation of boundary-crossing range (2)",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
+                       /* r1 = [0x00, 0xff] */
+                       BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
+                       /* r1 = [0xffff'ff80, 0x1'0000'007f] */
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
+                       /* r1 = [0xffff'ff80, 0xffff'ffff] or
+                        *      [0x0000'0000, 0x0000'007f]
+                        * difference to previous test: truncation via MOV32
+                        * instead of ALU32.
+                        */
+                       BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),
+                       BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
+                       /* r1 = [0x00, 0xff] or
+                        *      [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
+                        */
+                       BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
+                       /* r1 = 0 or
+                        *      [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
+                        */
+                       BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
+                       /* no-op or OOB pointer computation */
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+                       /* potentially OOB access */
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+                       /* exit */
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map1 = { 3 },
+               /* not actually fully unbounded, but the bound is very high */
+               .errstr = "R0 unbounded memory access",
+               .result = REJECT
+       },
+       {
+               "bounds check after wrapping 32-bit addition",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+                       /* r1 = 0x7fff'ffff */
+                       BPF_MOV64_IMM(BPF_REG_1, 0x7fffffff),
+                       /* r1 = 0xffff'fffe */
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
+                       /* r1 = 0 */
+                       BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 2),
+                       /* no-op */
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+                       /* access at offset 0 */
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+                       /* exit */
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map1 = { 3 },
+               .result = ACCEPT
+       },
+       {
+               "bounds check after shift with oversized count operand",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+                       BPF_MOV64_IMM(BPF_REG_2, 32),
+                       BPF_MOV64_IMM(BPF_REG_1, 1),
+                       /* r1 = (u32)1 << (u32)32 = ? */
+                       BPF_ALU32_REG(BPF_LSH, BPF_REG_1, BPF_REG_2),
+                       /* r1 = [0x0000, 0xffff] */
+                       BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xffff),
+                       /* computes unknown pointer, potentially OOB */
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+                       /* potentially OOB access */
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+                       /* exit */
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map1 = { 3 },
+               .errstr = "R0 max value is outside of the array range",
+               .result = REJECT
+       },
+       {
+               "bounds check after right shift of maybe-negative number",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+                       /* r1 = [0x00, 0xff] */
+                       BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+                       /* r1 = [-0x01, 0xfe] */
+                       BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 1),
+                       /* r1 = 0 or 0xff'ffff'ffff'ffff */
+                       BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
+                       /* r1 = 0 or 0xffff'ffff'ffff */
+                       BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
+                       /* computes unknown pointer, potentially OOB */
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+                       /* potentially OOB access */
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+                       /* exit */
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map1 = { 3 },
+               .errstr = "R0 unbounded memory access",
+               .result = REJECT
+       },
+       {
+               "bounds check map access with off+size signed 32bit overflow. test1",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x7ffffffe),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
+                       BPF_JMP_A(0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map1 = { 3 },
+               .errstr = "map_value pointer and 2147483646",
+               .result = REJECT
+       },
+       {
+               "bounds check map access with off+size signed 32bit overflow. test2",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
+                       BPF_JMP_A(0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map1 = { 3 },
+               .errstr = "pointer offset 1073741822",
+               .result = REJECT
+       },
+       {
+               "bounds check map access with off+size signed 32bit overflow. test3",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff),
+                       BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2),
+                       BPF_JMP_A(0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map1 = { 3 },
+               .errstr = "pointer offset -1073741822",
+               .result = REJECT
+       },
+       {
+               "bounds check map access with off+size signed 32bit overflow. test4",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_MOV64_IMM(BPF_REG_1, 1000000),
+                       BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 1000000),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2),
+                       BPF_JMP_A(0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map1 = { 3 },
+               .errstr = "map_value pointer and 1000000000000",
+               .result = REJECT
+       },
+       {
+               "pointer/scalar confusion in state equality check (way 1)",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
+                       BPF_JMP_A(1),
+                       BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
+                       BPF_JMP_A(0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map1 = { 3 },
+               .result = ACCEPT,
+               .result_unpriv = REJECT,
+               .errstr_unpriv = "R0 leaks addr as return value"
+       },
+       {
+               "pointer/scalar confusion in state equality check (way 2)",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+                       BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
+                       BPF_JMP_A(1),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map1 = { 3 },
+               .result = ACCEPT,
+               .result_unpriv = REJECT,
+               .errstr_unpriv = "R0 leaks addr as return value"
+       },
+       {
                "variable-offset ctx access",
                .insns = {
                        /* Get an unknown value */
@@ -6784,6 +7249,71 @@ static struct bpf_test tests[] = {
                .prog_type = BPF_PROG_TYPE_LWT_IN,
        },
        {
+               "indirect variable-offset stack access",
+               .insns = {
+                       /* Fill the top 8 bytes of the stack */
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       /* Get an unknown value */
+                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
+                       /* Make it small and 4-byte aligned */
+                       BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
+                       BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 8),
+                       /* add it to fp.  We now have either fp-4 or fp-8, but
+                        * we don't know which
+                        */
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
+                       /* dereference it indirectly */
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map1 = { 5 },
+               .errstr = "variable stack read R2",
+               .result = REJECT,
+               .prog_type = BPF_PROG_TYPE_LWT_IN,
+       },
+       {
+               "direct stack access with 32-bit wraparound. test1",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
+                       BPF_MOV32_IMM(BPF_REG_0, 0),
+                       BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+                       BPF_EXIT_INSN()
+               },
+               .errstr = "fp pointer and 2147483647",
+               .result = REJECT
+       },
+       {
+               "direct stack access with 32-bit wraparound. test2",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x3fffffff),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x3fffffff),
+                       BPF_MOV32_IMM(BPF_REG_0, 0),
+                       BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+                       BPF_EXIT_INSN()
+               },
+               .errstr = "fp pointer and 1073741823",
+               .result = REJECT
+       },
+       {
+               "direct stack access with 32-bit wraparound. test3",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x1fffffff),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x1fffffff),
+                       BPF_MOV32_IMM(BPF_REG_0, 0),
+                       BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+                       BPF_EXIT_INSN()
+               },
+               .errstr = "fp pointer offset 1073741822",
+               .result = REJECT
+       },
+       {
                "liveness pruning and write screening",
                .insns = {
                        /* Get an unknown value */
@@ -7105,6 +7635,19 @@ static struct bpf_test tests[] = {
                .prog_type = BPF_PROG_TYPE_SCHED_CLS,
        },
        {
+               "pkt_end - pkt_start is allowed",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+                                   offsetof(struct __sk_buff, data_end)),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                                   offsetof(struct __sk_buff, data)),
+                       BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_2),
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+       },
+       {
                "XDP pkt read, pkt_end mangling, bad access 1",
                .insns = {
                        BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
@@ -7119,7 +7662,7 @@ static struct bpf_test tests[] = {
                        BPF_MOV64_IMM(BPF_REG_0, 0),
                        BPF_EXIT_INSN(),
                },
-               .errstr = "R1 offset is outside of the packet",
+               .errstr = "R3 pointer arithmetic on PTR_TO_PACKET_END",
                .result = REJECT,
                .prog_type = BPF_PROG_TYPE_XDP,
        },
@@ -7138,7 +7681,7 @@ static struct bpf_test tests[] = {
                        BPF_MOV64_IMM(BPF_REG_0, 0),
                        BPF_EXIT_INSN(),
                },
-               .errstr = "R1 offset is outside of the packet",
+               .errstr = "R3 pointer arithmetic on PTR_TO_PACKET_END",
                .result = REJECT,
                .prog_type = BPF_PROG_TYPE_XDP,
        },
@@ -8730,6 +9273,196 @@ static struct bpf_test tests[] = {
                .result = ACCEPT,
        },
        {
+               "calls: stack overflow using two frames (pre-call access)",
+               .insns = {
+                       /* prog 1 */
+                       BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
+                       BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1),
+                       BPF_EXIT_INSN(),
+
+                       /* prog 2 */
+                       BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .prog_type = BPF_PROG_TYPE_XDP,
+               .errstr = "combined stack size",
+               .result = REJECT,
+       },
+       {
+               "calls: stack overflow using two frames (post-call access)",
+               .insns = {
+                       /* prog 1 */
+                       BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 2),
+                       BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
+                       BPF_EXIT_INSN(),
+
+                       /* prog 2 */
+                       BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .prog_type = BPF_PROG_TYPE_XDP,
+               .errstr = "combined stack size",
+               .result = REJECT,
+       },
+       {
+               "calls: stack depth check using three frames. test1",
+               .insns = {
+                       /* main */
+                       BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
+                       BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
+                       BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+                       /* A */
+                       BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
+                       BPF_EXIT_INSN(),
+                       /* B */
+                       BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
+                       BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .prog_type = BPF_PROG_TYPE_XDP,
+               /* stack_main=32, stack_A=256, stack_B=64
+                * and max(main+A, main+A+B) < 512
+                */
+               .result = ACCEPT,
+       },
+       {
+               "calls: stack depth check using three frames. test2",
+               .insns = {
+                       /* main */
+                       BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
+                       BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
+                       BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+                       /* A */
+                       BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
+                       BPF_EXIT_INSN(),
+                       /* B */
+                       BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
+                       BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .prog_type = BPF_PROG_TYPE_XDP,
+               /* stack_main=32, stack_A=64, stack_B=256
+                * and max(main+A, main+A+B) < 512
+                */
+               .result = ACCEPT,
+       },
+       {
+               "calls: stack depth check using three frames. test3",
+               .insns = {
+                       /* main */
+                       BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+                       BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+                       BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 8), /* call B */
+                       BPF_JMP_IMM(BPF_JGE, BPF_REG_6, 0, 1),
+                       BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+                       /* A */
+                       BPF_JMP_IMM(BPF_JLT, BPF_REG_1, 10, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ST_MEM(BPF_B, BPF_REG_10, -224, 0),
+                       BPF_JMP_IMM(BPF_JA, 0, 0, -3),
+                       /* B */
+                       BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 1),
+                       BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -6), /* call A */
+                       BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .prog_type = BPF_PROG_TYPE_XDP,
+               /* stack_main=64, stack_A=224, stack_B=256
+                * and max(main+A, main+A+B) > 512
+                */
+               .errstr = "combined stack",
+               .result = REJECT,
+       },
+       {
+               "calls: stack depth check using three frames. test4",
+               /* void main(void) {
+                *   func1(0);
+                *   func1(1);
+                *   func2(1);
+                * }
+                * void func1(int alloc_or_recurse) {
+                *   if (alloc_or_recurse) {
+                *     frame_pointer[-300] = 1;
+                *   } else {
+                *     func2(alloc_or_recurse);
+                *   }
+                * }
+                * void func2(int alloc_or_recurse) {
+                *   if (alloc_or_recurse) {
+                *     frame_pointer[-300] = 1;
+                *   }
+                * }
+                */
+               .insns = {
+                       /* main */
+                       BPF_MOV64_IMM(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
+                       BPF_MOV64_IMM(BPF_REG_1, 1),
+                       BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
+                       BPF_MOV64_IMM(BPF_REG_1, 1),
+                       BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 7), /* call B */
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+                       /* A */
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
+                       BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
+                       BPF_EXIT_INSN(),
+                       BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
+                       BPF_EXIT_INSN(),
+                       /* B */
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
+                       BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .prog_type = BPF_PROG_TYPE_XDP,
+               .result = REJECT,
+               .errstr = "combined stack",
+       },
+       {
+               "calls: stack depth check using three frames. test5",
+               .insns = {
+                       /* main */
+                       BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call A */
+                       BPF_EXIT_INSN(),
+                       /* A */
+                       BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
+                       BPF_EXIT_INSN(),
+                       /* B */
+                       BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call C */
+                       BPF_EXIT_INSN(),
+                       /* C */
+                       BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call D */
+                       BPF_EXIT_INSN(),
+                       /* D */
+                       BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call E */
+                       BPF_EXIT_INSN(),
+                       /* E */
+                       BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call F */
+                       BPF_EXIT_INSN(),
+                       /* F */
+                       BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call G */
+                       BPF_EXIT_INSN(),
+                       /* G */
+                       BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call H */
+                       BPF_EXIT_INSN(),
+                       /* H */
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .prog_type = BPF_PROG_TYPE_XDP,
+               .errstr = "call stack",
+               .result = REJECT,
+       },
+       {
                "calls: spill into caller stack frame",
                .insns = {
                        BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
@@ -9715,6 +10448,57 @@ static struct bpf_test tests[] = {
                .result = REJECT,
                .prog_type = BPF_PROG_TYPE_XDP,
        },
+       {
+               "search pruning: all branches should be verified (nop operation)",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0xbeef, 2),
+                       BPF_MOV64_IMM(BPF_REG_4, 0),
+                       BPF_JMP_A(1),
+                       BPF_MOV64_IMM(BPF_REG_4, 1),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -16),
+                       BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -16),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_5, 0, 2),
+                       BPF_MOV64_IMM(BPF_REG_6, 0),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xdead),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map1 = { 3 },
+               .errstr = "R6 invalid mem access 'inv'",
+               .result = REJECT,
+               .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+       },
+       {
+               "search pruning: all branches should be verified (invalid stack access)",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
+                       BPF_MOV64_IMM(BPF_REG_4, 0),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0xbeef, 2),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -16),
+                       BPF_JMP_A(1),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -24),
+                       BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -16),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map1 = { 3 },
+               .errstr = "invalid read from stack off -16+0 size 8",
+               .result = REJECT,
+               .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+       },
 };
 
 static int probe_filter_length(const struct bpf_insn *fp)
index 500c74d..d7c30d3 100644 (file)
@@ -5,6 +5,7 @@ CFLAGS =  -Wall -Wl,--no-as-needed -O2 -g
 CFLAGS += -I../../../../usr/include/
 
 TEST_PROGS := run_netsocktests run_afpackettests test_bpf.sh netdevice.sh rtnetlink.sh
+TEST_PROGS += fib_tests.sh
 TEST_GEN_FILES =  socket
 TEST_GEN_FILES += psock_fanout psock_tpacket msg_zerocopy
 TEST_GEN_PROGS = reuseport_bpf reuseport_bpf_cpu reuseport_bpf_numa
index e57b4ac..7177bea 100644 (file)
@@ -1,3 +1,4 @@
 CONFIG_USER_NS=y
 CONFIG_BPF_SYSCALL=y
 CONFIG_TEST_BPF=m
+CONFIG_NUMA=y
diff --git a/tools/testing/selftests/net/fib_tests.sh b/tools/testing/selftests/net/fib_tests.sh
new file mode 100755 (executable)
index 0000000..a9154ee
--- /dev/null
@@ -0,0 +1,429 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# This test is for checking IPv4 and IPv6 FIB behavior in response to
+# different events.
+
+ret=0
+
+check_err()
+{
+       if [ $ret -eq 0 ]; then
+               ret=$1
+       fi
+}
+
+check_fail()
+{
+       if [ $1 -eq 0 ]; then
+               ret=1
+       fi
+}
+
+netns_create()
+{
+       local testns=$1
+
+       ip netns add $testns
+       ip netns exec $testns ip link set dev lo up
+}
+
+fib_unreg_unicast_test()
+{
+       ret=0
+
+       netns_create "testns"
+
+       ip netns exec testns ip link add dummy0 type dummy
+       ip netns exec testns ip link set dev dummy0 up
+
+       ip netns exec testns ip address add 198.51.100.1/24 dev dummy0
+       ip netns exec testns ip -6 address add 2001:db8:1::1/64 dev dummy0
+
+       ip netns exec testns ip route get fibmatch 198.51.100.2 &> /dev/null
+       check_err $?
+       ip netns exec testns ip -6 route get fibmatch 2001:db8:1::2 &> /dev/null
+       check_err $?
+
+       ip netns exec testns ip link del dev dummy0
+       check_err $?
+
+       ip netns exec testns ip route get fibmatch 198.51.100.2 &> /dev/null
+       check_fail $?
+       ip netns exec testns ip -6 route get fibmatch 2001:db8:1::2 &> /dev/null
+       check_fail $?
+
+       ip netns del testns
+
+       if [ $ret -ne 0 ]; then
+               echo "FAIL: unicast route test"
+               return 1
+       fi
+       echo "PASS: unicast route test"
+}
+
+fib_unreg_multipath_test()
+{
+       ret=0
+
+       netns_create "testns"
+
+       ip netns exec testns ip link add dummy0 type dummy
+       ip netns exec testns ip link set dev dummy0 up
+
+       ip netns exec testns ip link add dummy1 type dummy
+       ip netns exec testns ip link set dev dummy1 up
+
+       ip netns exec testns ip address add 198.51.100.1/24 dev dummy0
+       ip netns exec testns ip -6 address add 2001:db8:1::1/64 dev dummy0
+
+       ip netns exec testns ip address add 192.0.2.1/24 dev dummy1
+       ip netns exec testns ip -6 address add 2001:db8:2::1/64 dev dummy1
+
+       ip netns exec testns ip route add 203.0.113.0/24 \
+               nexthop via 198.51.100.2 dev dummy0 \
+               nexthop via 192.0.2.2 dev dummy1
+       ip netns exec testns ip -6 route add 2001:db8:3::/64 \
+               nexthop via 2001:db8:1::2 dev dummy0 \
+               nexthop via 2001:db8:2::2 dev dummy1
+
+       ip netns exec testns ip route get fibmatch 203.0.113.1 &> /dev/null
+       check_err $?
+       ip netns exec testns ip -6 route get fibmatch 2001:db8:3::1 &> /dev/null
+       check_err $?
+
+       ip netns exec testns ip link del dev dummy0
+       check_err $?
+
+       ip netns exec testns ip route get fibmatch 203.0.113.1 &> /dev/null
+       check_fail $?
+       ip netns exec testns ip -6 route get fibmatch 2001:db8:3::1 &> /dev/null
+       # In IPv6 we do not flush the entire multipath route.
+       check_err $?
+
+       ip netns exec testns ip link del dev dummy1
+
+       ip netns del testns
+
+       if [ $ret -ne 0 ]; then
+               echo "FAIL: multipath route test"
+               return 1
+       fi
+       echo "PASS: multipath route test"
+}
+
+fib_unreg_test()
+{
+       echo "Running netdev unregister tests"
+
+       fib_unreg_unicast_test
+       fib_unreg_multipath_test
+}
+
+fib_down_unicast_test()
+{
+       ret=0
+
+       netns_create "testns"
+
+       ip netns exec testns ip link add dummy0 type dummy
+       ip netns exec testns ip link set dev dummy0 up
+
+       ip netns exec testns ip address add 198.51.100.1/24 dev dummy0
+       ip netns exec testns ip -6 address add 2001:db8:1::1/64 dev dummy0
+
+       ip netns exec testns ip route get fibmatch 198.51.100.2 &> /dev/null
+       check_err $?
+       ip netns exec testns ip -6 route get fibmatch 2001:db8:1::2 &> /dev/null
+       check_err $?
+
+       ip netns exec testns ip link set dev dummy0 down
+       check_err $?
+
+       ip netns exec testns ip route get fibmatch 198.51.100.2 &> /dev/null
+       check_fail $?
+       ip netns exec testns ip -6 route get fibmatch 2001:db8:1::2 &> /dev/null
+       check_fail $?
+
+       ip netns exec testns ip link del dev dummy0
+
+       ip netns del testns
+
+       if [ $ret -ne 0 ]; then
+               echo "FAIL: unicast route test"
+               return 1
+       fi
+       echo "PASS: unicast route test"
+}
+
+fib_down_multipath_test_do()
+{
+       local down_dev=$1
+       local up_dev=$2
+
+       ip netns exec testns ip route get fibmatch 203.0.113.1 \
+               oif $down_dev &> /dev/null
+       check_fail $?
+       ip netns exec testns ip -6 route get fibmatch 2001:db8:3::1 \
+               oif $down_dev &> /dev/null
+       check_fail $?
+
+       ip netns exec testns ip route get fibmatch 203.0.113.1 \
+               oif $up_dev &> /dev/null
+       check_err $?
+       ip netns exec testns ip -6 route get fibmatch 2001:db8:3::1 \
+               oif $up_dev &> /dev/null
+       check_err $?
+
+       ip netns exec testns ip route get fibmatch 203.0.113.1 | \
+               grep $down_dev | grep -q "dead linkdown"
+       check_err $?
+       ip netns exec testns ip -6 route get fibmatch 2001:db8:3::1 | \
+               grep $down_dev | grep -q "dead linkdown"
+       check_err $?
+
+       ip netns exec testns ip route get fibmatch 203.0.113.1 | \
+               grep $up_dev | grep -q "dead linkdown"
+       check_fail $?
+       ip netns exec testns ip -6 route get fibmatch 2001:db8:3::1 | \
+               grep $up_dev | grep -q "dead linkdown"
+       check_fail $?
+}
+
+fib_down_multipath_test()
+{
+       ret=0
+
+       netns_create "testns"
+
+       ip netns exec testns ip link add dummy0 type dummy
+       ip netns exec testns ip link set dev dummy0 up
+
+       ip netns exec testns ip link add dummy1 type dummy
+       ip netns exec testns ip link set dev dummy1 up
+
+       ip netns exec testns ip address add 198.51.100.1/24 dev dummy0
+       ip netns exec testns ip -6 address add 2001:db8:1::1/64 dev dummy0
+
+       ip netns exec testns ip address add 192.0.2.1/24 dev dummy1
+       ip netns exec testns ip -6 address add 2001:db8:2::1/64 dev dummy1
+
+       ip netns exec testns ip route add 203.0.113.0/24 \
+               nexthop via 198.51.100.2 dev dummy0 \
+               nexthop via 192.0.2.2 dev dummy1
+       ip netns exec testns ip -6 route add 2001:db8:3::/64 \
+               nexthop via 2001:db8:1::2 dev dummy0 \
+               nexthop via 2001:db8:2::2 dev dummy1
+
+       ip netns exec testns ip route get fibmatch 203.0.113.1 &> /dev/null
+       check_err $?
+       ip netns exec testns ip -6 route get fibmatch 2001:db8:3::1 &> /dev/null
+       check_err $?
+
+       ip netns exec testns ip link set dev dummy0 down
+       check_err $?
+
+       fib_down_multipath_test_do "dummy0" "dummy1"
+
+       ip netns exec testns ip link set dev dummy0 up
+       check_err $?
+       ip netns exec testns ip link set dev dummy1 down
+       check_err $?
+
+       fib_down_multipath_test_do "dummy1" "dummy0"
+
+       ip netns exec testns ip link set dev dummy0 down
+       check_err $?
+
+       ip netns exec testns ip route get fibmatch 203.0.113.1 &> /dev/null
+       check_fail $?
+       ip netns exec testns ip -6 route get fibmatch 2001:db8:3::1 &> /dev/null
+       check_fail $?
+
+       ip netns exec testns ip link del dev dummy1
+       ip netns exec testns ip link del dev dummy0
+
+       ip netns del testns
+
+       if [ $ret -ne 0 ]; then
+               echo "FAIL: multipath route test"
+               return 1
+       fi
+       echo "PASS: multipath route test"
+}
+
+fib_down_test()
+{
+       echo "Running netdev down tests"
+
+       fib_down_unicast_test
+       fib_down_multipath_test
+}
+
+fib_carrier_local_test()
+{
+       ret=0
+
+       # Local routes should not be affected when carrier changes.
+       netns_create "testns"
+
+       ip netns exec testns ip link add dummy0 type dummy
+       ip netns exec testns ip link set dev dummy0 up
+
+       ip netns exec testns ip link set dev dummy0 carrier on
+
+       ip netns exec testns ip address add 198.51.100.1/24 dev dummy0
+       ip netns exec testns ip -6 address add 2001:db8:1::1/64 dev dummy0
+
+       ip netns exec testns ip route get fibmatch 198.51.100.1 &> /dev/null
+       check_err $?
+       ip netns exec testns ip -6 route get fibmatch 2001:db8:1::1 &> /dev/null
+       check_err $?
+
+       ip netns exec testns ip route get fibmatch 198.51.100.1 | \
+               grep -q "linkdown"
+       check_fail $?
+       ip netns exec testns ip -6 route get fibmatch 2001:db8:1::1 | \
+               grep -q "linkdown"
+       check_fail $?
+
+       ip netns exec testns ip link set dev dummy0 carrier off
+
+       ip netns exec testns ip route get fibmatch 198.51.100.1 &> /dev/null
+       check_err $?
+       ip netns exec testns ip -6 route get fibmatch 2001:db8:1::1 &> /dev/null
+       check_err $?
+
+       ip netns exec testns ip route get fibmatch 198.51.100.1 | \
+               grep -q "linkdown"
+       check_fail $?
+       ip netns exec testns ip -6 route get fibmatch 2001:db8:1::1 | \
+               grep -q "linkdown"
+       check_fail $?
+
+       ip netns exec testns ip address add 192.0.2.1/24 dev dummy0
+       ip netns exec testns ip -6 address add 2001:db8:2::1/64 dev dummy0
+
+       ip netns exec testns ip route get fibmatch 192.0.2.1 &> /dev/null
+       check_err $?
+       ip netns exec testns ip -6 route get fibmatch 2001:db8:2::1 &> /dev/null
+       check_err $?
+
+       ip netns exec testns ip route get fibmatch 192.0.2.1 | \
+               grep -q "linkdown"
+       check_fail $?
+       ip netns exec testns ip -6 route get fibmatch 2001:db8:2::1 | \
+               grep -q "linkdown"
+       check_fail $?
+
+       ip netns exec testns ip link del dev dummy0
+
+       ip netns del testns
+
+       if [ $ret -ne 0 ]; then
+               echo "FAIL: local route carrier test"
+               return 1
+       fi
+       echo "PASS: local route carrier test"
+}
+
+fib_carrier_unicast_test()
+{
+       ret=0
+
+       netns_create "testns"
+
+       ip netns exec testns ip link add dummy0 type dummy
+       ip netns exec testns ip link set dev dummy0 up
+
+       ip netns exec testns ip link set dev dummy0 carrier on
+
+       ip netns exec testns ip address add 198.51.100.1/24 dev dummy0
+       ip netns exec testns ip -6 address add 2001:db8:1::1/64 dev dummy0
+
+       ip netns exec testns ip route get fibmatch 198.51.100.2 &> /dev/null
+       check_err $?
+       ip netns exec testns ip -6 route get fibmatch 2001:db8:1::2 &> /dev/null
+       check_err $?
+
+       ip netns exec testns ip route get fibmatch 198.51.100.2 | \
+               grep -q "linkdown"
+       check_fail $?
+       ip netns exec testns ip -6 route get fibmatch 2001:db8:1::2 | \
+               grep -q "linkdown"
+       check_fail $?
+
+       ip netns exec testns ip link set dev dummy0 carrier off
+
+       ip netns exec testns ip route get fibmatch 198.51.100.2 &> /dev/null
+       check_err $?
+       ip netns exec testns ip -6 route get fibmatch 2001:db8:1::2 &> /dev/null
+       check_err $?
+
+       ip netns exec testns ip route get fibmatch 198.51.100.2 | \
+               grep -q "linkdown"
+       check_err $?
+       ip netns exec testns ip -6 route get fibmatch 2001:db8:1::2 | \
+               grep -q "linkdown"
+       check_err $?
+
+       ip netns exec testns ip address add 192.0.2.1/24 dev dummy0
+       ip netns exec testns ip -6 address add 2001:db8:2::1/64 dev dummy0
+
+       ip netns exec testns ip route get fibmatch 192.0.2.2 &> /dev/null
+       check_err $?
+       ip netns exec testns ip -6 route get fibmatch 2001:db8:2::2 &> /dev/null
+       check_err $?
+
+       ip netns exec testns ip route get fibmatch 192.0.2.2 | \
+               grep -q "linkdown"
+       check_err $?
+       ip netns exec testns ip -6 route get fibmatch 2001:db8:2::2 | \
+               grep -q "linkdown"
+       check_err $?
+
+       ip netns exec testns ip link del dev dummy0
+
+       ip netns del testns
+
+       if [ $ret -ne 0 ]; then
+               echo "FAIL: unicast route carrier test"
+               return 1
+       fi
+       echo "PASS: unicast route carrier test"
+}
+
+fib_carrier_test()
+{
+       echo "Running netdev carrier change tests"
+
+       fib_carrier_local_test
+       fib_carrier_unicast_test
+}
+
+fib_test()
+{
+       fib_unreg_test
+       fib_down_test
+       fib_carrier_test
+}
+
+if [ "$(id -u)" -ne 0 ];then
+       echo "SKIP: Need root privileges"
+       exit 0
+fi
+
+if [ ! -x "$(command -v ip)" ]; then
+       echo "SKIP: Could not run test without ip tool"
+       exit 0
+fi
+
+ip route help 2>&1 | grep -q fibmatch
+if [ $? -ne 0 ]; then
+       echo "SKIP: iproute2 too old, missing fibmatch"
+       exit 0
+fi
+
+fib_test
+
+exit $ret
index 3ab6ec4..e11fe84 100644 (file)
@@ -259,22 +259,28 @@ static int setup_ip6h(struct ipv6hdr *ip6h, uint16_t payload_len)
        return sizeof(*ip6h);
 }
 
-static void setup_sockaddr(int domain, const char *str_addr, void *sockaddr)
+
+static void setup_sockaddr(int domain, const char *str_addr,
+                          struct sockaddr_storage *sockaddr)
 {
        struct sockaddr_in6 *addr6 = (void *) sockaddr;
        struct sockaddr_in *addr4 = (void *) sockaddr;
 
        switch (domain) {
        case PF_INET:
+               memset(addr4, 0, sizeof(*addr4));
                addr4->sin_family = AF_INET;
                addr4->sin_port = htons(cfg_port);
-               if (inet_pton(AF_INET, str_addr, &(addr4->sin_addr)) != 1)
+               if (str_addr &&
+                   inet_pton(AF_INET, str_addr, &(addr4->sin_addr)) != 1)
                        error(1, 0, "ipv4 parse error: %s", str_addr);
                break;
        case PF_INET6:
+               memset(addr6, 0, sizeof(*addr6));
                addr6->sin6_family = AF_INET6;
                addr6->sin6_port = htons(cfg_port);
-               if (inet_pton(AF_INET6, str_addr, &(addr6->sin6_addr)) != 1)
+               if (str_addr &&
+                   inet_pton(AF_INET6, str_addr, &(addr6->sin6_addr)) != 1)
                        error(1, 0, "ipv6 parse error: %s", str_addr);
                break;
        default:
@@ -603,6 +609,7 @@ static void parse_opts(int argc, char **argv)
                                    sizeof(struct tcphdr) -
                                    40 /* max tcp options */;
        int c;
+       char *daddr = NULL, *saddr = NULL;
 
        cfg_payload_len = max_payload_len;
 
@@ -627,7 +634,7 @@ static void parse_opts(int argc, char **argv)
                        cfg_cpu = strtol(optarg, NULL, 0);
                        break;
                case 'D':
-                       setup_sockaddr(cfg_family, optarg, &cfg_dst_addr);
+                       daddr = optarg;
                        break;
                case 'i':
                        cfg_ifindex = if_nametoindex(optarg);
@@ -638,7 +645,7 @@ static void parse_opts(int argc, char **argv)
                        cfg_cork_mixed = true;
                        break;
                case 'p':
-                       cfg_port = htons(strtoul(optarg, NULL, 0));
+                       cfg_port = strtoul(optarg, NULL, 0);
                        break;
                case 'r':
                        cfg_rx = true;
@@ -647,7 +654,7 @@ static void parse_opts(int argc, char **argv)
                        cfg_payload_len = strtoul(optarg, NULL, 0);
                        break;
                case 'S':
-                       setup_sockaddr(cfg_family, optarg, &cfg_src_addr);
+                       saddr = optarg;
                        break;
                case 't':
                        cfg_runtime_ms = 200 + strtoul(optarg, NULL, 10) * 1000;
@@ -660,6 +667,8 @@ static void parse_opts(int argc, char **argv)
                        break;
                }
        }
+       setup_sockaddr(cfg_family, daddr, &cfg_dst_addr);
+       setup_sockaddr(cfg_family, saddr, &cfg_src_addr);
 
        if (cfg_payload_len > max_payload_len)
                error(1, 0, "-s: payload exceeds max (%d)", max_payload_len);
index 5215493..a622eee 100755 (executable)
@@ -502,6 +502,231 @@ kci_test_macsec()
        echo "PASS: macsec"
 }
 
+kci_test_gretap()
+{
+       testns="testns"
+       DEV_NS=gretap00
+       ret=0
+
+       ip netns add "$testns"
+       if [ $? -ne 0 ]; then
+               echo "SKIP gretap tests: cannot add net namespace $testns"
+               return 1
+       fi
+
+       ip link help gretap 2>&1 | grep -q "^Usage:"
+       if [ $? -ne 0 ];then
+               echo "SKIP: gretap: iproute2 too old"
+               return 1
+       fi
+
+       # test native tunnel
+       ip netns exec "$testns" ip link add dev "$DEV_NS" type gretap seq \
+               key 102 local 172.16.1.100 remote 172.16.1.200
+       check_err $?
+
+       ip netns exec "$testns" ip addr add dev "$DEV_NS" 10.1.1.100/24
+       check_err $?
+
+       ip netns exec "$testns" ip link set dev $DEV_NS up
+       check_err $?
+
+       ip netns exec "$testns" ip link del "$DEV_NS"
+       check_err $?
+
+       # test external mode
+       ip netns exec "$testns" ip link add dev "$DEV_NS" type gretap external
+       check_err $?
+
+       ip netns exec "$testns" ip link del "$DEV_NS"
+       check_err $?
+
+       if [ $ret -ne 0 ]; then
+               echo "FAIL: gretap"
+               return 1
+       fi
+       echo "PASS: gretap"
+
+       ip netns del "$testns"
+}
+
+kci_test_ip6gretap()
+{
+       testns="testns"
+       DEV_NS=ip6gretap00
+       ret=0
+
+       ip netns add "$testns"
+       if [ $? -ne 0 ]; then
+               echo "SKIP ip6gretap tests: cannot add net namespace $testns"
+               return 1
+       fi
+
+       ip link help ip6gretap 2>&1 | grep -q "^Usage:"
+       if [ $? -ne 0 ];then
+               echo "SKIP: ip6gretap: iproute2 too old"
+               return 1
+       fi
+
+       # test native tunnel
+       ip netns exec "$testns" ip link add dev "$DEV_NS" type ip6gretap seq \
+               key 102 local fc00:100::1 remote fc00:100::2
+       check_err $?
+
+       ip netns exec "$testns" ip addr add dev "$DEV_NS" fc00:200::1/96
+       check_err $?
+
+       ip netns exec "$testns" ip link set dev $DEV_NS up
+       check_err $?
+
+       ip netns exec "$testns" ip link del "$DEV_NS"
+       check_err $?
+
+       # test external mode
+       ip netns exec "$testns" ip link add dev "$DEV_NS" type ip6gretap external
+       check_err $?
+
+       ip netns exec "$testns" ip link del "$DEV_NS"
+       check_err $?
+
+       if [ $ret -ne 0 ]; then
+               echo "FAIL: ip6gretap"
+               return 1
+       fi
+       echo "PASS: ip6gretap"
+
+       ip netns del "$testns"
+}
+
+kci_test_erspan()
+{
+       testns="testns"
+       DEV_NS=erspan00
+       ret=0
+
+       ip link help erspan 2>&1 | grep -q "^Usage:"
+       if [ $? -ne 0 ];then
+               echo "SKIP: erspan: iproute2 too old"
+               return 1
+       fi
+
+       ip netns add "$testns"
+       if [ $? -ne 0 ]; then
+               echo "SKIP erspan tests: cannot add net namespace $testns"
+               return 1
+       fi
+
+       # test native tunnel erspan v1
+       ip netns exec "$testns" ip link add dev "$DEV_NS" type erspan seq \
+               key 102 local 172.16.1.100 remote 172.16.1.200 \
+               erspan_ver 1 erspan 488
+       check_err $?
+
+       ip netns exec "$testns" ip addr add dev "$DEV_NS" 10.1.1.100/24
+       check_err $?
+
+       ip netns exec "$testns" ip link set dev $DEV_NS up
+       check_err $?
+
+       ip netns exec "$testns" ip link del "$DEV_NS"
+       check_err $?
+
+       # test native tunnel erspan v2
+       ip netns exec "$testns" ip link add dev "$DEV_NS" type erspan seq \
+               key 102 local 172.16.1.100 remote 172.16.1.200 \
+               erspan_ver 2 erspan_dir ingress erspan_hwid 7
+       check_err $?
+
+       ip netns exec "$testns" ip addr add dev "$DEV_NS" 10.1.1.100/24
+       check_err $?
+
+       ip netns exec "$testns" ip link set dev $DEV_NS up
+       check_err $?
+
+       ip netns exec "$testns" ip link del "$DEV_NS"
+       check_err $?
+
+       # test external mode
+       ip netns exec "$testns" ip link add dev "$DEV_NS" type erspan external
+       check_err $?
+
+       ip netns exec "$testns" ip link del "$DEV_NS"
+       check_err $?
+
+       if [ $ret -ne 0 ]; then
+               echo "FAIL: erspan"
+               return 1
+       fi
+       echo "PASS: erspan"
+
+       ip netns del "$testns"
+}
+
+kci_test_ip6erspan()
+{
+       testns="testns"
+       DEV_NS=ip6erspan00
+       ret=0
+
+       ip link help ip6erspan 2>&1 | grep -q "^Usage:"
+       if [ $? -ne 0 ];then
+               echo "SKIP: ip6erspan: iproute2 too old"
+               return 1
+       fi
+
+       ip netns add "$testns"
+       if [ $? -ne 0 ]; then
+               echo "SKIP ip6erspan tests: cannot add net namespace $testns"
+               return 1
+       fi
+
+       # test native tunnel ip6erspan v1
+       ip netns exec "$testns" ip link add dev "$DEV_NS" type ip6erspan seq \
+               key 102 local fc00:100::1 remote fc00:100::2 \
+               erspan_ver 1 erspan 488
+       check_err $?
+
+       ip netns exec "$testns" ip addr add dev "$DEV_NS" 10.1.1.100/24
+       check_err $?
+
+       ip netns exec "$testns" ip link set dev $DEV_NS up
+       check_err $?
+
+       ip netns exec "$testns" ip link del "$DEV_NS"
+       check_err $?
+
+       # test native tunnel ip6erspan v2
+       ip netns exec "$testns" ip link add dev "$DEV_NS" type ip6erspan seq \
+               key 102 local fc00:100::1 remote fc00:100::2 \
+               erspan_ver 2 erspan_dir ingress erspan_hwid 7
+       check_err $?
+
+       ip netns exec "$testns" ip addr add dev "$DEV_NS" 10.1.1.100/24
+       check_err $?
+
+       ip netns exec "$testns" ip link set dev $DEV_NS up
+       check_err $?
+
+       ip netns exec "$testns" ip link del "$DEV_NS"
+       check_err $?
+
+       # test external mode
+       ip netns exec "$testns" ip link add dev "$DEV_NS" \
+               type ip6erspan external
+       check_err $?
+
+       ip netns exec "$testns" ip link del "$DEV_NS"
+       check_err $?
+
+       if [ $ret -ne 0 ]; then
+               echo "FAIL: ip6erspan"
+               return 1
+       fi
+       echo "PASS: ip6erspan"
+
+       ip netns del "$testns"
+}
+
 kci_test_rtnl()
 {
        kci_add_dummy
@@ -514,6 +739,10 @@ kci_test_rtnl()
        kci_test_route_get
        kci_test_tc
        kci_test_gre
+       kci_test_gretap
+       kci_test_ip6gretap
+       kci_test_erspan
+       kci_test_ip6erspan
        kci_test_bridge
        kci_test_addrlabel
        kci_test_ifalias
index 66e5ce5..0304ffb 100644 (file)
@@ -627,13 +627,10 @@ static void do_multicpu_tests(void)
 static int finish_exec_test(void)
 {
        /*
-        * In a sensible world, this would be check_invalid_segment(0, 1);
-        * For better or for worse, though, the LDT is inherited across exec.
-        * We can probably change this safely, but for now we test it.
+        * Older kernel versions did inherit the LDT on exec() which is
+        * wrong because exec() starts from a clean state.
         */
-       check_valid_segment(0, 1,
-                           AR_DPL3 | AR_TYPE_XRCODE | AR_S | AR_P | AR_DB,
-                           42, true);
+       check_invalid_segment(0, 1);
 
        return nerrs ? 1 : 0;
 }
index f9555b1..cc29a81 100644 (file)
@@ -92,16 +92,23 @@ static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id)
 {
        struct kvm_vcpu *vcpu = *(struct kvm_vcpu **)dev_id;
        struct arch_timer_context *vtimer;
+       u32 cnt_ctl;
 
-       if (!vcpu) {
-               pr_warn_once("Spurious arch timer IRQ on non-VCPU thread\n");
-               return IRQ_NONE;
-       }
-       vtimer = vcpu_vtimer(vcpu);
+       /*
+        * We may see a timer interrupt after vcpu_put() has been called which
+        * sets the CPU's vcpu pointer to NULL, because even though the timer
+        * has been disabled in vtimer_save_state(), the hardware interrupt
+        * signal may not have been retired from the interrupt controller yet.
+        */
+       if (!vcpu)
+               return IRQ_HANDLED;
 
+       vtimer = vcpu_vtimer(vcpu);
        if (!vtimer->irq.level) {
-               vtimer->cnt_ctl = read_sysreg_el0(cntv_ctl);
-               if (kvm_timer_irq_can_fire(vtimer))
+               cnt_ctl = read_sysreg_el0(cntv_ctl);
+               cnt_ctl &= ARCH_TIMER_CTRL_ENABLE | ARCH_TIMER_CTRL_IT_STAT |
+                          ARCH_TIMER_CTRL_IT_MASK;
+               if (cnt_ctl == (ARCH_TIMER_CTRL_ENABLE | ARCH_TIMER_CTRL_IT_STAT))
                        kvm_timer_update_irq(vcpu, true, vtimer);
        }
 
@@ -355,6 +362,7 @@ static void vtimer_save_state(struct kvm_vcpu *vcpu)
 
        /* Disable the virtual timer */
        write_sysreg_el0(0, cntv_ctl);
+       isb();
 
        vtimer->loaded = false;
 out:
@@ -720,7 +728,7 @@ static int kvm_timer_dying_cpu(unsigned int cpu)
        return 0;
 }
 
-int kvm_timer_hyp_init(void)
+int kvm_timer_hyp_init(bool has_gic)
 {
        struct arch_timer_kvm_info *info;
        int err;
@@ -756,10 +764,13 @@ int kvm_timer_hyp_init(void)
                return err;
        }
 
-       err = irq_set_vcpu_affinity(host_vtimer_irq, kvm_get_running_vcpus());
-       if (err) {
-               kvm_err("kvm_arch_timer: error setting vcpu affinity\n");
-               goto out_free_irq;
+       if (has_gic) {
+               err = irq_set_vcpu_affinity(host_vtimer_irq,
+                                           kvm_get_running_vcpus());
+               if (err) {
+                       kvm_err("kvm_arch_timer: error setting vcpu affinity\n");
+                       goto out_free_irq;
+               }
        }
 
        kvm_info("virtual timer IRQ%d\n", host_vtimer_irq);
@@ -835,10 +846,7 @@ int kvm_timer_enable(struct kvm_vcpu *vcpu)
 no_vgic:
        preempt_disable();
        timer->enabled = 1;
-       if (!irqchip_in_kernel(vcpu->kvm))
-               kvm_timer_vcpu_load_user(vcpu);
-       else
-               kvm_timer_vcpu_load_vgic(vcpu);
+       kvm_timer_vcpu_load(vcpu);
        preempt_enable();
 
        return 0;
index 6b60c98..2e43f9d 100644 (file)
@@ -1326,7 +1326,7 @@ static int init_subsystems(void)
        /*
         * Init HYP architected timer support
         */
-       err = kvm_timer_hyp_init();
+       err = kvm_timer_hyp_init(vgic_present);
        if (err)
                goto out;
 
index b6e715f..dac7ceb 100644 (file)
@@ -112,7 +112,7 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
                }
 
                trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr,
-                              data);
+                              &data);
                data = vcpu_data_host_to_guest(vcpu, data, len);
                vcpu_set_reg(vcpu, vcpu->arch.mmio_decode.rt, data);
        }
@@ -182,14 +182,14 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
                data = vcpu_data_guest_to_host(vcpu, vcpu_get_reg(vcpu, rt),
                                               len);
 
-               trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, len, fault_ipa, data);
+               trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, len, fault_ipa, &data);
                kvm_mmio_write_buf(data_buf, len, data);
 
                ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, fault_ipa, len,
                                       data_buf);
        } else {
                trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, len,
-                              fault_ipa, 0);
+                              fault_ipa, NULL);
 
                ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, fault_ipa, len,
                                      data_buf);
index b36945d..b4b69c2 100644 (file)
@@ -509,8 +509,6 @@ static void unmap_hyp_range(pgd_t *pgdp, phys_addr_t start, u64 size)
  */
 void free_hyp_pgds(void)
 {
-       unsigned long addr;
-
        mutex_lock(&kvm_hyp_pgd_mutex);
 
        if (boot_hyp_pgd) {
@@ -521,10 +519,10 @@ void free_hyp_pgds(void)
 
        if (hyp_pgd) {
                unmap_hyp_range(hyp_pgd, hyp_idmap_start, PAGE_SIZE);
-               for (addr = PAGE_OFFSET; virt_addr_valid(addr); addr += PGDIR_SIZE)
-                       unmap_hyp_range(hyp_pgd, kern_hyp_va(addr), PGDIR_SIZE);
-               for (addr = VMALLOC_START; is_vmalloc_addr((void*)addr); addr += PGDIR_SIZE)
-                       unmap_hyp_range(hyp_pgd, kern_hyp_va(addr), PGDIR_SIZE);
+               unmap_hyp_range(hyp_pgd, kern_hyp_va(PAGE_OFFSET),
+                               (uintptr_t)high_memory - PAGE_OFFSET);
+               unmap_hyp_range(hyp_pgd, kern_hyp_va(VMALLOC_START),
+                               VMALLOC_END - VMALLOC_START);
 
                free_pages((unsigned long)hyp_pgd, hyp_pgd_order);
                hyp_pgd = NULL;